ggml-backend.cpp 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998
  1. // Note: porting this file to C++ is a work in progress
  2. #ifdef _WIN32
  3. #define WIN32_LEAN_AND_MEAN
  4. #ifndef NOMINMAX
  5. # define NOMINMAX
  6. #endif
  7. #include <windows.h>
  8. #endif
  9. #include "ggml-backend.h"
  10. #include "ggml-backend-impl.h"
  11. #include "ggml-alloc.h"
  12. #include "ggml-impl.h"
  13. #include <assert.h>
  14. #include <limits.h>
  15. #include <stdarg.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <string.h>
  19. #include <string>
  20. #include <vector>
  21. #ifdef __APPLE__
  22. #include <sys/types.h>
  23. #include <sys/sysctl.h>
  24. #endif
  25. // backend buffer type
  26. const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
  27. return buft->iface.get_name(buft);
  28. }
  29. ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  30. if (size == 0) {
  31. // return a dummy buffer for zero-sized allocations
  32. return ggml_backend_buffer_init(buft, {}, NULL, 0);
  33. }
  34. return buft->iface.alloc_buffer(buft, size);
  35. }
  36. size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
  37. return buft->iface.get_alignment(buft);
  38. }
  39. size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
  40. // get_max_size is optional, defaults to SIZE_MAX
  41. if (buft->iface.get_max_size) {
  42. return buft->iface.get_max_size(buft);
  43. }
  44. return SIZE_MAX;
  45. }
  46. size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
  47. // get_alloc_size is optional, defaults to ggml_nbytes
  48. if (buft->iface.get_alloc_size) {
  49. size_t size = buft->iface.get_alloc_size(buft, tensor);
  50. assert(size >= ggml_nbytes(tensor));
  51. return size;
  52. }
  53. return ggml_nbytes(tensor);
  54. }
  55. bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
  56. if (buft->iface.is_host) {
  57. return buft->iface.is_host(buft);
  58. }
  59. return false;
  60. }
  61. ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) {
  62. return buft->device;
  63. }
  64. // backend buffer
  65. ggml_backend_buffer_t ggml_backend_buffer_init(
  66. ggml_backend_buffer_type_t buft,
  67. struct ggml_backend_buffer_i iface,
  68. void * context,
  69. size_t size) {
  70. ggml_backend_buffer_t buffer = new ggml_backend_buffer {
  71. /* .interface = */ iface,
  72. /* .buft = */ buft,
  73. /* .context = */ context,
  74. /* .size = */ size,
  75. /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
  76. };
  77. return buffer;
  78. }
  79. const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
  80. return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer));
  81. }
  82. void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
  83. if (buffer == NULL) {
  84. return;
  85. }
  86. if (buffer->iface.free_buffer != NULL) {
  87. buffer->iface.free_buffer(buffer);
  88. }
  89. delete buffer;
  90. }
  91. size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
  92. return buffer->size;
  93. }
  94. void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
  95. // get_base is optional if the buffer is zero-sized
  96. if (buffer->size == 0) {
  97. return NULL;
  98. }
  99. void * base = buffer->iface.get_base(buffer);
  100. GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
  101. return base;
  102. }
  103. void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  104. // init_tensor is optional
  105. if (buffer->iface.init_tensor) {
  106. buffer->iface.init_tensor(buffer, tensor);
  107. }
  108. }
  109. void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  110. // clear is optional if the buffer is zero-sized
  111. if (buffer->size == 0) {
  112. return;
  113. }
  114. buffer->iface.clear(buffer, value);
  115. }
  116. size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
  117. return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
  118. }
  119. size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
  120. return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
  121. }
  122. size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  123. return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
  124. }
  125. bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
  126. return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
  127. }
  128. void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  129. buffer->usage = usage;
  130. // FIXME: add a generic callback to the buffer interface
  131. if (ggml_backend_buffer_is_multi_buffer(buffer)) {
  132. ggml_backend_multi_buffer_set_usage(buffer, usage);
  133. }
  134. }
  135. enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) {
  136. return buffer->usage;
  137. }
  138. ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
  139. return buffer->buft;
  140. }
  141. void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
  142. if (buffer->iface.reset) {
  143. buffer->iface.reset(buffer);
  144. }
  145. }
  146. bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
  147. ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
  148. if (dst_buf->iface.cpy_tensor) {
  149. return dst_buf->iface.cpy_tensor(dst_buf, src, dst);
  150. }
  151. return false;
  152. }
  153. // backend
  154. ggml_guid_t ggml_backend_guid(ggml_backend_t backend) {
  155. if (backend == NULL) {
  156. return NULL;
  157. }
  158. return backend->guid;
  159. }
  160. const char * ggml_backend_name(ggml_backend_t backend) {
  161. if (backend == NULL) {
  162. return "NULL";
  163. }
  164. return backend->iface.get_name(backend);
  165. }
  166. void ggml_backend_free(ggml_backend_t backend) {
  167. if (backend == NULL) {
  168. return;
  169. }
  170. backend->iface.free(backend);
  171. }
  172. ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) {
  173. return ggml_backend_dev_buffer_type(backend->device);
  174. }
  175. ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
  176. return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size);
  177. }
  178. size_t ggml_backend_get_alignment(ggml_backend_t backend) {
  179. return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend));
  180. }
  181. size_t ggml_backend_get_max_size(ggml_backend_t backend) {
  182. return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend));
  183. }
  184. void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  185. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  186. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  187. if (backend->iface.set_tensor_async == NULL) {
  188. ggml_backend_tensor_set(tensor, data, offset, size);
  189. } else {
  190. backend->iface.set_tensor_async(backend, tensor, data, offset, size);
  191. }
  192. }
  193. void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  194. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  195. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  196. if (backend->iface.get_tensor_async == NULL) {
  197. ggml_backend_tensor_get(tensor, data, offset, size);
  198. } else {
  199. backend->iface.get_tensor_async(backend, tensor, data, offset, size);
  200. }
  201. }
  202. void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  203. GGML_ASSERT(tensor);
  204. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  205. if (size == 0) {
  206. return;
  207. }
  208. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  209. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  210. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  211. buf->iface.set_tensor(buf, tensor, data, offset, size);
  212. }
  213. void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  214. GGML_ASSERT(tensor);
  215. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  216. if (size == 0) {
  217. return;
  218. }
  219. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  220. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  221. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  222. buf->iface.get_tensor(buf, tensor, data, offset, size);
  223. }
  224. void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  225. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  226. if (size == 0) {
  227. return;
  228. }
  229. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  230. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  231. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  232. GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer");
  233. buf->iface.memset_tensor(buf, tensor, value, offset, size);
  234. }
  235. void ggml_backend_synchronize(ggml_backend_t backend) {
  236. if (backend->iface.synchronize == NULL) {
  237. return;
  238. }
  239. backend->iface.synchronize(backend);
  240. }
  241. ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  242. GGML_ASSERT(backend->iface.graph_plan_create != NULL);
  243. return backend->iface.graph_plan_create(backend, cgraph);
  244. }
  245. void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  246. GGML_ASSERT(backend->iface.graph_plan_free != NULL);
  247. backend->iface.graph_plan_free(backend, plan);
  248. }
  249. enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  250. GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
  251. return backend->iface.graph_plan_compute(backend, plan);
  252. }
  253. enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  254. enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph);
  255. ggml_backend_synchronize(backend);
  256. return err;
  257. }
  258. enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  259. return backend->iface.graph_compute(backend, cgraph);
  260. }
  261. bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  262. return ggml_backend_dev_supports_op(backend->device, op);
  263. }
  264. bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  265. return ggml_backend_dev_supports_buft(backend->device, buft);
  266. }
  267. bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  268. return ggml_backend_dev_offload_op(backend->device, op);
  269. }
  270. ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) {
  271. return backend->device;
  272. }
  273. // backend copy
  274. static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
  275. if (a->type != b->type) {
  276. return false;
  277. }
  278. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  279. if (a->ne[i] != b->ne[i]) {
  280. return false;
  281. }
  282. if (a->nb[i] != b->nb[i]) {
  283. return false;
  284. }
  285. }
  286. return true;
  287. }
  288. void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
  289. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  290. if (src == dst) {
  291. return;
  292. }
  293. if (ggml_backend_buffer_is_host(src->buffer)) {
  294. ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
  295. } else if (ggml_backend_buffer_is_host(dst->buffer)) {
  296. ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
  297. } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
  298. #ifndef NDEBUG
  299. GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
  300. #endif
  301. size_t nbytes = ggml_nbytes(src);
  302. void * data = malloc(nbytes);
  303. ggml_backend_tensor_get(src, data, 0, nbytes);
  304. ggml_backend_tensor_set(dst, data, 0, nbytes);
  305. free(data);
  306. }
  307. }
  308. void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) {
  309. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  310. if (src == dst) {
  311. return;
  312. }
  313. if (backend_dst->iface.cpy_tensor_async != NULL) {
  314. if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
  315. return;
  316. }
  317. }
  318. // an async copy would normally happen after all the queued operations on both backends are completed
  319. // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
  320. ggml_backend_synchronize(backend_src);
  321. ggml_backend_synchronize(backend_dst);
  322. ggml_backend_tensor_copy(src, dst);
  323. }
  324. // events
  325. ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) {
  326. // null device is allowed for the transition period to the device interface
  327. if (device == NULL || device->iface.event_new == NULL) {
  328. return NULL;
  329. }
  330. return device->iface.event_new(device);
  331. }
  332. void ggml_backend_event_free(ggml_backend_event_t event) {
  333. if (event == NULL) {
  334. return;
  335. }
  336. event->device->iface.event_free(event->device, event);
  337. }
  338. void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) {
  339. GGML_ASSERT(backend->iface.event_record != NULL);
  340. backend->iface.event_record(backend, event);
  341. }
  342. void ggml_backend_event_synchronize(ggml_backend_event_t event) {
  343. GGML_ASSERT(event->device->iface.event_synchronize);
  344. event->device->iface.event_synchronize(event->device, event);
  345. }
  346. void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) {
  347. GGML_ASSERT(backend->iface.event_wait != NULL);
  348. backend->iface.event_wait(backend, event);
  349. }
  350. // Backend device
  351. const char * ggml_backend_dev_name(ggml_backend_dev_t device) {
  352. return device->iface.get_name(device);
  353. }
  354. const char * ggml_backend_dev_description(ggml_backend_dev_t device) {
  355. return device->iface.get_description(device);
  356. }
  357. void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
  358. device->iface.get_memory(device, free, total);
  359. }
  360. enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
  361. return device->iface.get_type(device);
  362. }
  363. void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
  364. memset(props, 0, sizeof(*props));
  365. device->iface.get_props(device, props);
  366. }
  367. ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) {
  368. return device->reg;
  369. }
  370. ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) {
  371. return device->iface.init_backend(device, params);
  372. }
  373. ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) {
  374. return device->iface.get_buffer_type(device);
  375. }
  376. ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
  377. if (device->iface.get_host_buffer_type == NULL) {
  378. return NULL;
  379. }
  380. return device->iface.get_host_buffer_type(device);
  381. }
  382. ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
  383. return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
  384. }
  385. bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
  386. return device->iface.supports_op(device, op);
  387. }
  388. bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) {
  389. return device->iface.supports_buft(device, buft);
  390. }
  391. bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
  392. if (device->iface.offload_op != NULL) {
  393. return device->iface.offload_op(device, op);
  394. }
  395. return false;
  396. }
  397. // Backend (reg)
  398. const char * ggml_backend_reg_name(ggml_backend_reg_t reg) {
  399. return reg->iface.get_name(reg);
  400. }
  401. size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) {
  402. return reg->iface.get_device_count(reg);
  403. }
  404. ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) {
  405. return reg->iface.get_device(reg, index);
  406. }
  407. void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  408. if (!reg->iface.get_proc_address) {
  409. return NULL;
  410. }
  411. return reg->iface.get_proc_address(reg, name);
  412. }
  413. // multi-buffer buffer
  414. struct ggml_backend_multi_buffer_context {
  415. ggml_backend_buffer_t * buffers;
  416. size_t n_buffers;
  417. };
  418. static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  419. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  420. for (size_t i = 0; i < ctx->n_buffers; i++) {
  421. ggml_backend_buffer_free(ctx->buffers[i]);
  422. }
  423. free(ctx->buffers);
  424. free(ctx);
  425. }
  426. static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  427. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  428. for (size_t i = 0; i < ctx->n_buffers; i++) {
  429. ggml_backend_buffer_clear(ctx->buffers[i], value);
  430. }
  431. }
  432. static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = {
  433. /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer,
  434. /* .get_base = */ NULL,
  435. /* .init_tensor = */ NULL,
  436. /* .memset_tensor = */ NULL,
  437. /* .set_tensor = */ NULL,
  438. /* .get_tensor = */ NULL,
  439. /* .cpy_tensor = */ NULL,
  440. /* .clear = */ ggml_backend_multi_buffer_clear,
  441. /* .reset = */ NULL,
  442. };
  443. ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) {
  444. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context));
  445. ctx->n_buffers = n_buffers;
  446. ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
  447. GGML_ASSERT(ctx->buffers != NULL);
  448. size_t total_size = 0;
  449. for (size_t i = 0; i < n_buffers; i++) {
  450. ctx->buffers[i] = buffers[i];
  451. total_size += ggml_backend_buffer_get_size(buffers[i]);
  452. }
  453. return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size);
  454. }
  455. bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) {
  456. return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer;
  457. }
  458. void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  459. GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer));
  460. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  461. for (size_t i = 0; i < ctx->n_buffers; i++) {
  462. ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
  463. }
  464. }
  465. // creates a copy of the tensor with the same memory layout
  466. static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
  467. struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
  468. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  469. dup->nb[i] = tensor->nb[i];
  470. }
  471. return dup;
  472. }
  473. static bool ggml_is_view_op(enum ggml_op op) {
  474. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  475. }
  476. // scheduler
  477. #ifndef GGML_SCHED_MAX_BACKENDS
  478. #define GGML_SCHED_MAX_BACKENDS 16
  479. #endif
  480. #ifndef GGML_SCHED_MAX_SPLIT_INPUTS
  481. #define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC
  482. #endif
  483. #ifndef GGML_SCHED_MAX_COPIES
  484. #define GGML_SCHED_MAX_COPIES 4
  485. #endif
  486. struct ggml_backend_sched_split {
  487. int backend_id;
  488. int i_start;
  489. int i_end;
  490. struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
  491. int n_inputs;
  492. // graph view of this split
  493. struct ggml_cgraph graph;
  494. };
  495. struct ggml_backend_sched {
  496. bool is_reset; // true if the scheduler has been reset since the last graph split
  497. bool is_alloc;
  498. int n_backends;
  499. ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS];
  500. ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS];
  501. ggml_gallocr_t galloc;
  502. // hash map of the nodes in the graph
  503. struct ggml_hash_set hash_set;
  504. int * hv_tensor_backend_ids; // [hash_set.size]
  505. struct ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies]
  506. int * node_backend_ids; // [graph_size]
  507. int * leaf_backend_ids; // [graph_size]
  508. int * prev_node_backend_ids; // [graph_size]
  509. int * prev_leaf_backend_ids; // [graph_size]
  510. // copy of the graph with modified inputs
  511. struct ggml_cgraph graph;
  512. // graph splits
  513. struct ggml_backend_sched_split * splits;
  514. int n_splits;
  515. int splits_capacity;
  516. // pipeline parallelism support
  517. int n_copies;
  518. int cur_copy;
  519. ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES];
  520. struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
  521. int n_graph_inputs;
  522. struct ggml_context * ctx;
  523. ggml_backend_sched_eval_callback callback_eval;
  524. void * callback_eval_user_data;
  525. char * context_buffer;
  526. size_t context_buffer_size;
  527. int debug;
  528. };
  529. #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
  530. #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
  531. #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
  532. #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
  533. // returns the priority of the backend, lower id is higher priority
  534. static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) {
  535. for (int i = 0; i < sched->n_backends; i++) {
  536. if (sched->backends[i] == backend) {
  537. return i;
  538. }
  539. }
  540. return -1;
  541. }
  542. static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) {
  543. ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  544. if (buffer == NULL) {
  545. return -1;
  546. }
  547. // find highest prio backend that supports the buffer type and the op
  548. for (int i = 0; i < sched->n_backends; i++) {
  549. if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
  550. ggml_backend_supports_op(sched->backends[i], op)) {
  551. return i;
  552. }
  553. }
  554. #ifndef NDEBUG
  555. GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
  556. __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
  557. #endif
  558. return -1;
  559. }
  560. #if 0
  561. #define GGML_SCHED_MAX_SPLITS_DEBUG 4096
  562. static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
  563. #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
  564. #define GET_CAUSE(node) causes[hash_id(node)]
  565. #else
  566. #define SET_CAUSE(node, ...)
  567. #define GET_CAUSE(node) ""
  568. #endif
  569. // returns the backend that should be used for the node based on the current locations
  570. static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) {
  571. // assign pre-allocated nodes to their backend
  572. int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
  573. if (cur_backend_id != -1) {
  574. SET_CAUSE(tensor, "1.dst");
  575. return cur_backend_id;
  576. }
  577. // view_src
  578. if (tensor->view_src != NULL) {
  579. cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
  580. if (cur_backend_id != -1) {
  581. SET_CAUSE(tensor, "1.vsrc");
  582. return cur_backend_id;
  583. }
  584. }
  585. if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
  586. // since the tensor is pre-allocated, it cannot be moved to another backend
  587. GGML_ABORT("pre-allocated tensor (%s) in a backend that cannot run the operation", tensor->name);
  588. }
  589. // graph input
  590. if (tensor->flags & GGML_TENSOR_FLAG_INPUT) {
  591. cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU)
  592. SET_CAUSE(tensor, "1.inp");
  593. return cur_backend_id;
  594. }
  595. // operations with weights are preferably run on the same backend as the weights
  596. for (int i = 0; i < GGML_MAX_SRC; i++) {
  597. const struct ggml_tensor * src = tensor->src[i];
  598. if (src == NULL) {
  599. continue;
  600. }
  601. // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
  602. // not an ideal solution
  603. if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
  604. int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
  605. // check if a backend with higher prio wants to offload the op
  606. if (src_backend_id == sched->n_backends - 1) {
  607. for (int b = 0; b < src_backend_id; b++) {
  608. if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
  609. SET_CAUSE(tensor, "1.off");
  610. return b;
  611. }
  612. }
  613. }
  614. SET_CAUSE(tensor, "1.wgt%d", i);
  615. return src_backend_id;
  616. }
  617. }
  618. return -1;
  619. }
  620. static char * fmt_size(size_t size) {
  621. static char buffer[128];
  622. if (size >= 1024*1024) {
  623. snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
  624. } else {
  625. snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
  626. }
  627. return buffer;
  628. }
  629. static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  630. int cur_split = 0;
  631. for (int i = 0; i < graph->n_nodes; i++) {
  632. if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
  633. ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
  634. GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
  635. sched->splits[cur_split].n_inputs);
  636. for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
  637. GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
  638. fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
  639. }
  640. GGML_LOG_DEBUG("\n");
  641. cur_split++;
  642. }
  643. struct ggml_tensor * node = graph->nodes[i];
  644. if (ggml_is_view_op(node->op)) {
  645. continue;
  646. }
  647. if (sched->debug > 1) {
  648. ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node);
  649. GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
  650. fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node));
  651. for (int j = 0; j < GGML_MAX_SRC; j++) {
  652. struct ggml_tensor * src = node->src[j];
  653. if (src == NULL) {
  654. continue;
  655. }
  656. ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src);
  657. GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
  658. fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
  659. }
  660. GGML_LOG_DEBUG("\n");
  661. }
  662. }
  663. }
  664. static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) {
  665. ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
  666. ggml_backend_buffer_type_t buft = NULL;
  667. if (buf) {
  668. // the tensor is already allocated
  669. buft = buf->buft;
  670. } else {
  671. // see if the tensor already has a backend assigned, and use the buffer type of that backend
  672. int tensor_backend_id = tensor_backend_id(t);
  673. if (tensor_backend_id == -1 && t->view_src) {
  674. tensor_backend_id = tensor_backend_id(t->view_src);
  675. }
  676. if (tensor_backend_id != -1) {
  677. buft = sched->bufts[tensor_backend_id];
  678. }
  679. }
  680. return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft);
  681. }
  682. static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
  683. if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
  684. *node_backend_id = cur_backend_id;
  685. SET_CAUSE(node, "2.sup");
  686. }
  687. }
  688. // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
  689. static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  690. // reset splits
  691. sched->n_splits = 0;
  692. sched->n_graph_inputs = 0;
  693. sched->is_reset = false;
  694. struct ggml_init_params params = {
  695. /* .mem_size = */ sched->context_buffer_size,
  696. /* .mem_buffer = */ sched->context_buffer,
  697. /* .no_alloc = */ true
  698. };
  699. ggml_free(sched->ctx);
  700. sched->ctx = ggml_init(params);
  701. if (sched->ctx == NULL) {
  702. GGML_ABORT("%s: failed to initialize context\n", __func__);
  703. }
  704. // pass 1: assign backends to ops with pre-allocated inputs
  705. for (int i = 0; i < graph->n_leafs; i++) {
  706. struct ggml_tensor * leaf = graph->leafs[i];
  707. int * leaf_backend_id = &tensor_backend_id(leaf);
  708. // do not overwrite user assignments
  709. if (*leaf_backend_id == -1) {
  710. *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf);
  711. }
  712. }
  713. for (int i = 0; i < graph->n_nodes; i++) {
  714. struct ggml_tensor * node = graph->nodes[i];
  715. int * node_backend_id = &tensor_backend_id(node);
  716. // do not overwrite user assignments
  717. if (*node_backend_id == -1) {
  718. *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node);
  719. #if 0
  720. // src
  721. if (node->op == GGML_OP_NONE) {
  722. continue;
  723. }
  724. for (int j = 0; j < GGML_MAX_SRC; j++) {
  725. struct ggml_tensor * src = node->src[j];
  726. if (src == NULL) {
  727. continue;
  728. }
  729. int * src_backend_id = &tensor_backend_id(src);
  730. if (*src_backend_id == -1) {
  731. *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src);
  732. }
  733. }
  734. #endif
  735. }
  736. }
  737. // pass 2: expand current backend assignments
  738. // assign the same backend to adjacent nodes
  739. // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
  740. // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
  741. // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
  742. // expand gpu down
  743. {
  744. int cur_backend_id = -1;
  745. for (int i = 0; i < graph->n_nodes; i++) {
  746. struct ggml_tensor * node = graph->nodes[i];
  747. if (ggml_is_view_op(node->op)) {
  748. continue;
  749. }
  750. int * node_backend_id = &tensor_backend_id(node);
  751. if (*node_backend_id != -1) {
  752. if (*node_backend_id == sched->n_backends - 1) {
  753. // skip cpu (lowest prio backend)
  754. cur_backend_id = -1;
  755. } else {
  756. cur_backend_id = *node_backend_id;
  757. }
  758. } else if (cur_backend_id != -1) {
  759. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  760. }
  761. }
  762. }
  763. // expand gpu up
  764. {
  765. int cur_backend_id = -1;
  766. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  767. struct ggml_tensor * node = graph->nodes[i];
  768. if (ggml_is_view_op(node->op)) {
  769. continue;
  770. }
  771. int * node_backend_id = &tensor_backend_id(node);
  772. if (*node_backend_id != -1) {
  773. if (*node_backend_id == sched->n_backends - 1) {
  774. // skip cpu (lowest prio backend)
  775. cur_backend_id = -1;
  776. } else {
  777. cur_backend_id = *node_backend_id;
  778. }
  779. } else if (cur_backend_id != -1) {
  780. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  781. }
  782. }
  783. }
  784. // expand rest down
  785. {
  786. int cur_backend_id = -1;
  787. for (int i = 0; i < graph->n_nodes; i++) {
  788. struct ggml_tensor * node = graph->nodes[i];
  789. if (ggml_is_view_op(node->op)) {
  790. continue;
  791. }
  792. int * node_backend_id = &tensor_backend_id(node);
  793. if (*node_backend_id != -1) {
  794. cur_backend_id = *node_backend_id;
  795. } else if (cur_backend_id != -1) {
  796. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  797. }
  798. }
  799. }
  800. // expand rest up
  801. {
  802. int cur_backend_id = -1;
  803. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  804. struct ggml_tensor * node = graph->nodes[i];
  805. if (ggml_is_view_op(node->op)) {
  806. continue;
  807. }
  808. int * node_backend_id = &tensor_backend_id(node);
  809. if (*node_backend_id != -1) {
  810. cur_backend_id = *node_backend_id;
  811. } else if (cur_backend_id != -1) {
  812. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  813. }
  814. }
  815. }
  816. // pass 3: upgrade nodes to higher prio backends with compatible buffer types
  817. // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
  818. // however, we also need to verify that the sources are in compatible buffer types
  819. // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
  820. // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
  821. // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
  822. // additionally, set remaining unassigned nodes to the backend with the most supported inputs
  823. // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
  824. for (int i = 0; i < graph->n_nodes; i++) {
  825. struct ggml_tensor * node = graph->nodes[i];
  826. if (ggml_is_view_op(node->op)) {
  827. continue;
  828. }
  829. int * node_backend_id = &tensor_backend_id(node);
  830. if (*node_backend_id == -1) {
  831. // unassigned node: find the backend with the most supported inputs
  832. int n_supported_best = -1;
  833. for (int b = 0; b < sched->n_backends; b++) {
  834. if (ggml_backend_supports_op(sched->backends[b], node)) {
  835. int n_supported = 0;
  836. for (int j = 0; j < GGML_MAX_SRC; j++) {
  837. struct ggml_tensor * src = node->src[j];
  838. if (src == NULL) {
  839. continue;
  840. }
  841. if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) {
  842. n_supported++;
  843. }
  844. }
  845. if (n_supported > n_supported_best) {
  846. n_supported_best = n_supported;
  847. *node_backend_id = b;
  848. SET_CAUSE(node, "3.best");
  849. }
  850. }
  851. }
  852. } else {
  853. // assigned node: upgrade to higher prio backend if possible
  854. for (int b = 0; b < *node_backend_id; b++) {
  855. if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) {
  856. bool supported = true;
  857. for (int j = 0; j < GGML_MAX_SRC; j++) {
  858. struct ggml_tensor * src = node->src[j];
  859. if (src == NULL) {
  860. continue;
  861. }
  862. if (!ggml_backend_sched_buffer_supported(sched, src, b)) {
  863. supported = false;
  864. break;
  865. }
  866. }
  867. if (supported) {
  868. *node_backend_id = b;
  869. SET_CAUSE(node, "3.upg");
  870. break;
  871. }
  872. }
  873. }
  874. }
  875. }
  876. // pass 4: assign backends to remaining src from dst and view_src
  877. for (int i = 0; i < graph->n_nodes; i++) {
  878. struct ggml_tensor * node = graph->nodes[i];
  879. int * cur_backend_id = &tensor_backend_id(node);
  880. if (node->view_src != NULL && *cur_backend_id == -1) {
  881. *cur_backend_id = tensor_backend_id(node->view_src);
  882. SET_CAUSE(node, "4.vsrc");
  883. }
  884. for (int j = 0; j < GGML_MAX_SRC; j++) {
  885. struct ggml_tensor * src = node->src[j];
  886. if (src == NULL) {
  887. continue;
  888. }
  889. int * src_backend_id = &tensor_backend_id(src);
  890. if (*src_backend_id == -1) {
  891. if (src->view_src != NULL) {
  892. // views are always on the same backend as the source
  893. *src_backend_id = tensor_backend_id(src->view_src);
  894. SET_CAUSE(src, "4.vsrc");
  895. } else {
  896. *src_backend_id = *cur_backend_id;
  897. SET_CAUSE(src, "4.cur");
  898. }
  899. }
  900. }
  901. }
  902. // pass 5: split graph, find tensors that need to be copied
  903. {
  904. int i_split = 0;
  905. struct ggml_backend_sched_split * split = &sched->splits[0];
  906. // find the backend of the first split, skipping view ops
  907. int i = 0;
  908. for (; i < graph->n_nodes; i++) {
  909. struct ggml_tensor * node = graph->nodes[i];
  910. if (!ggml_is_view_op(node->op)) {
  911. split->backend_id = tensor_backend_id(node);
  912. break;
  913. }
  914. }
  915. split->i_start = 0;
  916. split->n_inputs = 0;
  917. int cur_backend_id = split->backend_id;
  918. for (; i < graph->n_nodes; i++) {
  919. struct ggml_tensor * node = graph->nodes[i];
  920. if (ggml_is_view_op(node->op)) {
  921. continue;
  922. }
  923. const int node_backend_id = tensor_backend_id(node);
  924. assert(node_backend_id != -1); // all nodes should be assigned by now
  925. // check if we should start a new split based on the sources of the current node
  926. bool need_new_split = false;
  927. if (node_backend_id == cur_backend_id && split->n_inputs > 0) {
  928. for (int j = 0; j < GGML_MAX_SRC; j++) {
  929. struct ggml_tensor * src = node->src[j];
  930. if (src == NULL) {
  931. continue;
  932. }
  933. // check if a weight is on a different and incompatible backend
  934. // by starting a new split, the memory of the previously offloaded weights can be reused
  935. if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
  936. int src_backend_id = tensor_backend_id(src);
  937. if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
  938. need_new_split = true;
  939. break;
  940. }
  941. }
  942. // check if the split has too many inputs
  943. // FIXME: count the number of inputs instead of only checking when full
  944. if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) {
  945. const size_t id = hash_id(src);
  946. int src_backend_id = sched->hv_tensor_backend_ids[id];
  947. bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
  948. if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) {
  949. need_new_split = true;
  950. break;
  951. }
  952. }
  953. }
  954. }
  955. if (node_backend_id != cur_backend_id || need_new_split) {
  956. split->i_end = i;
  957. i_split++;
  958. if (i_split >= sched->splits_capacity) {
  959. sched->splits_capacity *= 2;
  960. sched->splits = (ggml_backend_sched_split *)
  961. realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split));
  962. GGML_ASSERT(sched->splits != NULL);
  963. }
  964. split = &sched->splits[i_split];
  965. split->backend_id = node_backend_id;
  966. split->i_start = i;
  967. split->n_inputs = 0;
  968. cur_backend_id = node_backend_id;
  969. }
  970. // find inputs that are not on the same backend
  971. for (int j = 0; j < GGML_MAX_SRC; j++) {
  972. struct ggml_tensor * src = node->src[j];
  973. if (src == NULL) {
  974. continue;
  975. }
  976. size_t src_id = hash_id(src);
  977. const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
  978. assert(src_backend_id != -1); // all inputs should be assigned by now
  979. if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
  980. if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
  981. ggml_backend_t backend = sched->backends[src_backend_id];
  982. for (int c = 0; c < sched->n_copies; c++) {
  983. struct ggml_tensor * tensor_copy;
  984. if (c == sched->cur_copy) {
  985. tensor_copy = src; // use the original tensor as the current copy
  986. } else {
  987. tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
  988. ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
  989. }
  990. if (sched->n_copies > 1) {
  991. ggml_set_input(tensor_copy);
  992. ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
  993. }
  994. tensor_id_copy(src_id, src_backend_id, c) = tensor_copy;
  995. SET_CAUSE(tensor_copy, "4.cpy");
  996. }
  997. int n_graph_inputs = sched->n_graph_inputs++;
  998. GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
  999. sched->graph_inputs[n_graph_inputs] = src;
  1000. }
  1001. }
  1002. if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
  1003. // create a copy of the input in the split's backend
  1004. if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) {
  1005. ggml_backend_t backend = sched->backends[cur_backend_id];
  1006. for (int c = 0; c < sched->n_copies; c++) {
  1007. struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
  1008. ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
  1009. if (sched->n_copies > 1) {
  1010. ggml_set_input(tensor_copy);
  1011. ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
  1012. }
  1013. tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy;
  1014. SET_CAUSE(tensor_copy, "4.cpy");
  1015. }
  1016. int n_inputs = split->n_inputs++;
  1017. GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
  1018. split->inputs[n_inputs] = src;
  1019. }
  1020. node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy);
  1021. }
  1022. }
  1023. }
  1024. split->i_end = graph->n_nodes;
  1025. sched->n_splits = i_split + 1;
  1026. }
  1027. if (sched->debug) {
  1028. ggml_backend_sched_print_assignments(sched, graph);
  1029. }
  1030. // swap node_backend_ids and leaf _backend_ids with prevs
  1031. {
  1032. int * tmp = sched->node_backend_ids;
  1033. sched->node_backend_ids = sched->prev_node_backend_ids;
  1034. sched->prev_node_backend_ids = tmp;
  1035. tmp = sched->leaf_backend_ids;
  1036. sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
  1037. sched->prev_leaf_backend_ids = tmp;
  1038. }
  1039. int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies;
  1040. if (sched->graph.size < graph_size) {
  1041. sched->graph.size = graph_size;
  1042. sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *));
  1043. sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *));
  1044. GGML_ASSERT(sched->graph.nodes != NULL);
  1045. GGML_ASSERT(sched->graph.leafs != NULL);
  1046. }
  1047. sched->graph.n_nodes = 0;
  1048. sched->graph.n_leafs = 0;
  1049. struct ggml_cgraph * graph_copy = &sched->graph;
  1050. for (int i = 0; i < sched->n_splits; i++) {
  1051. struct ggml_backend_sched_split * split = &sched->splits[i];
  1052. split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
  1053. // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
  1054. for (int j = 0; j < split->n_inputs; j++) {
  1055. assert(graph_copy->size > (graph_copy->n_nodes + 1));
  1056. struct ggml_tensor * input = split->inputs[j];
  1057. const size_t input_id = hash_id(input);
  1058. struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy);
  1059. // add a dependency to the input source so that it is not freed before the copy is done
  1060. struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input);
  1061. input_dep->src[0] = input;
  1062. sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id];
  1063. graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
  1064. // add a dependency to the input copy so that it is allocated at the start of the split
  1065. sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
  1066. graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
  1067. }
  1068. for (int j = split->i_start; j < split->i_end; j++) {
  1069. assert(graph_copy->size > graph_copy->n_nodes);
  1070. sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
  1071. graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
  1072. }
  1073. }
  1074. if (sched->n_copies > 1) {
  1075. // add input copies as leafs so that they are allocated first
  1076. for (int i = 0; i < sched->n_graph_inputs; i++) {
  1077. struct ggml_tensor * input = sched->graph_inputs[i];
  1078. size_t id = hash_id(input);
  1079. int backend_id = tensor_backend_id(input);
  1080. for (int c = 0; c < sched->n_copies; c++) {
  1081. struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
  1082. sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
  1083. assert(graph_copy->size > graph_copy->n_leafs);
  1084. graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
  1085. }
  1086. }
  1087. for (int i = 0; i < sched->n_splits; i++) {
  1088. struct ggml_backend_sched_split * split = &sched->splits[i];
  1089. int backend_id = split->backend_id;
  1090. for (int j = 0; j < split->n_inputs; j++) {
  1091. struct ggml_tensor * input = split->inputs[j];
  1092. size_t id = hash_id(input);
  1093. for (int c = 0; c < sched->n_copies; c++) {
  1094. struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
  1095. sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
  1096. assert(graph_copy->size > graph_copy->n_leafs);
  1097. graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
  1098. }
  1099. }
  1100. }
  1101. }
  1102. // add leafs from the original graph
  1103. for (int i = 0; i < graph->n_leafs; i++) {
  1104. struct ggml_tensor * leaf = graph->leafs[i];
  1105. sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf);
  1106. assert(graph_copy->size > graph_copy->n_leafs);
  1107. graph_copy->leafs[graph_copy->n_leafs++] = leaf;
  1108. }
  1109. }
  1110. static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
  1111. bool backend_ids_changed = false;
  1112. for (int i = 0; i < sched->graph.n_nodes; i++) {
  1113. if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
  1114. sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
  1115. backend_ids_changed = true;
  1116. break;
  1117. }
  1118. }
  1119. if (!backend_ids_changed) {
  1120. for (int i = 0; i < sched->graph.n_leafs; i++) {
  1121. if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
  1122. sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
  1123. backend_ids_changed = true;
  1124. break;
  1125. }
  1126. }
  1127. }
  1128. // allocate graph
  1129. if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
  1130. // the re-allocation may cause the split inputs to be moved to a different address
  1131. ggml_backend_sched_synchronize(sched);
  1132. #ifndef NDEBUG
  1133. GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
  1134. #endif
  1135. ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
  1136. if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
  1137. GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
  1138. return false;
  1139. }
  1140. }
  1141. return true;
  1142. }
  1143. static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
  1144. struct ggml_backend_sched_split * splits = sched->splits;
  1145. for (int i = 0; i < sched->n_splits; i++) {
  1146. struct ggml_backend_sched_split * split = &splits[i];
  1147. int split_backend_id = split->backend_id;
  1148. ggml_backend_t split_backend = sched->backends[split_backend_id];
  1149. // copy the input tensors to the split backend
  1150. for (int j = 0; j < split->n_inputs; j++) {
  1151. ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[j]);
  1152. struct ggml_tensor * input = split->inputs[j];
  1153. struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
  1154. if (input->flags & GGML_TENSOR_FLAG_INPUT) {
  1155. // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
  1156. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1157. ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
  1158. } else {
  1159. ggml_backend_synchronize(split_backend);
  1160. }
  1161. ggml_backend_tensor_copy(input, input_cpy);
  1162. } else {
  1163. // wait for the split backend to finish using the input before overwriting it
  1164. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1165. ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
  1166. } else {
  1167. ggml_backend_synchronize(split_backend);
  1168. }
  1169. // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
  1170. // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
  1171. if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
  1172. ggml_backend_synchronize(input_backend);
  1173. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1174. ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
  1175. } else {
  1176. ggml_backend_synchronize(split_backend);
  1177. }
  1178. ggml_backend_tensor_copy(input, input_cpy);
  1179. }
  1180. }
  1181. }
  1182. if (!sched->callback_eval) {
  1183. enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
  1184. if (ec != GGML_STATUS_SUCCESS) {
  1185. return ec;
  1186. }
  1187. } else {
  1188. // similar to ggml_backend_compare_graph_backend
  1189. for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
  1190. struct ggml_tensor * t = split->graph.nodes[j0];
  1191. // check if the user needs data from this node
  1192. bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1193. int j1 = j0;
  1194. // determine the range [j0, j1] of nodes that can be computed together
  1195. while (!need && j1 < split->graph.n_nodes - 1) {
  1196. t = split->graph.nodes[++j1];
  1197. need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1198. }
  1199. struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
  1200. enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv);
  1201. if (ec != GGML_STATUS_SUCCESS) {
  1202. return ec;
  1203. }
  1204. // TODO: pass backend to the callback, then the user can decide if they want to synchronize
  1205. ggml_backend_synchronize(split_backend);
  1206. if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
  1207. break;
  1208. }
  1209. j0 = j1;
  1210. }
  1211. }
  1212. // record the event of this copy
  1213. if (split->n_inputs > 0) {
  1214. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1215. ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend);
  1216. }
  1217. }
  1218. }
  1219. sched->cur_copy = (sched->cur_copy + 1) % sched->n_copies;
  1220. return GGML_STATUS_SUCCESS;
  1221. }
  1222. ggml_backend_sched_t ggml_backend_sched_new(
  1223. ggml_backend_t * backends,
  1224. ggml_backend_buffer_type_t * bufts,
  1225. int n_backends,
  1226. size_t graph_size,
  1227. bool parallel) {
  1228. GGML_ASSERT(n_backends > 0);
  1229. GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
  1230. GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
  1231. struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched));
  1232. const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG");
  1233. sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0;
  1234. sched->n_backends = n_backends;
  1235. sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1;
  1236. // initialize hash table
  1237. // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
  1238. sched->hash_set = ggml_hash_set_new(graph_size);
  1239. sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
  1240. sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
  1241. const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
  1242. const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2;
  1243. sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
  1244. sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
  1245. sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
  1246. sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
  1247. sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
  1248. sched->context_buffer = (char *) malloc(sched->context_buffer_size);
  1249. const int initial_splits_capacity = 16;
  1250. sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0]));
  1251. sched->splits_capacity = initial_splits_capacity;
  1252. for (int b = 0; b < n_backends; b++) {
  1253. sched->backends[b] = backends[b];
  1254. sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
  1255. GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
  1256. if (sched->n_copies > 1) {
  1257. for (int c = 0; c < sched->n_copies; c++) {
  1258. sched->events[b][c] = ggml_backend_event_new(backends[b]->device);
  1259. }
  1260. }
  1261. }
  1262. sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
  1263. ggml_backend_sched_reset(sched);
  1264. return sched;
  1265. }
  1266. void ggml_backend_sched_free(ggml_backend_sched_t sched) {
  1267. if (sched == NULL) {
  1268. return;
  1269. }
  1270. for (int b = 0; b < sched->n_backends; b++) {
  1271. for (int c = 0; c < sched->n_copies; c++) {
  1272. ggml_backend_event_free(sched->events[b][c]);
  1273. }
  1274. }
  1275. ggml_gallocr_free(sched->galloc);
  1276. ggml_free(sched->ctx);
  1277. ggml_hash_set_free(&sched->hash_set);
  1278. free(sched->splits);
  1279. free(sched->hv_tensor_backend_ids);
  1280. free(sched->hv_tensor_copies);
  1281. free(sched->node_backend_ids);
  1282. free(sched->leaf_backend_ids);
  1283. free(sched->prev_node_backend_ids);
  1284. free(sched->prev_leaf_backend_ids);
  1285. free(sched->context_buffer);
  1286. free(sched->graph.nodes);
  1287. free(sched->graph.leafs);
  1288. free(sched);
  1289. }
  1290. void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
  1291. // reset state for the next run
  1292. if (!sched->is_reset) {
  1293. ggml_hash_set_reset(&sched->hash_set);
  1294. memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
  1295. memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
  1296. sched->is_reset = true;
  1297. }
  1298. sched->is_alloc = false;
  1299. }
  1300. bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
  1301. GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
  1302. ggml_backend_sched_split_graph(sched, measure_graph);
  1303. ggml_backend_sched_synchronize(sched);
  1304. if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
  1305. return false;
  1306. }
  1307. ggml_backend_sched_reset(sched);
  1308. return true;
  1309. }
  1310. bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1311. GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
  1312. ggml_backend_sched_split_graph(sched, graph);
  1313. if (!ggml_backend_sched_alloc_splits(sched)) {
  1314. return false;
  1315. }
  1316. sched->is_alloc = true;
  1317. return true;
  1318. }
  1319. enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1320. enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph);
  1321. ggml_backend_sched_synchronize(sched);
  1322. return err;
  1323. }
  1324. enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1325. if (!sched->is_reset && !sched->is_alloc) {
  1326. ggml_backend_sched_reset(sched);
  1327. }
  1328. if (!sched->is_alloc) {
  1329. if (!ggml_backend_sched_alloc_graph(sched, graph)) {
  1330. return GGML_STATUS_ALLOC_FAILED;
  1331. }
  1332. }
  1333. return ggml_backend_sched_compute_splits(sched);
  1334. }
  1335. void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) {
  1336. for (int i = 0; i < sched->n_backends; i++) {
  1337. ggml_backend_synchronize(sched->backends[i]);
  1338. }
  1339. }
  1340. void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
  1341. sched->callback_eval = callback;
  1342. sched->callback_eval_user_data = user_data;
  1343. }
  1344. int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
  1345. return sched->n_splits;
  1346. }
  1347. int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) {
  1348. return sched->n_copies;
  1349. }
  1350. int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) {
  1351. return sched->n_backends;
  1352. }
  1353. ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) {
  1354. GGML_ASSERT(i >= 0 && i < sched->n_backends);
  1355. return sched->backends[i];
  1356. }
  1357. size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
  1358. int backend_index = ggml_backend_sched_backend_id(sched, backend);
  1359. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1360. return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
  1361. }
  1362. void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
  1363. int backend_index = ggml_backend_sched_backend_id(sched, backend);
  1364. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1365. tensor_backend_id(node) = backend_index;
  1366. SET_CAUSE(node, "usr");
  1367. sched->is_reset = false;
  1368. }
  1369. ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
  1370. int backend_index = tensor_backend_id(node);
  1371. if (backend_index == -1) {
  1372. return NULL;
  1373. }
  1374. return sched->backends[backend_index];
  1375. }
  1376. // utils
  1377. void ggml_backend_view_init(struct ggml_tensor * tensor) {
  1378. GGML_ASSERT(tensor->buffer == NULL);
  1379. GGML_ASSERT(tensor->view_src != NULL);
  1380. GGML_ASSERT(tensor->view_src->buffer != NULL);
  1381. GGML_ASSERT(tensor->view_src->data != NULL);
  1382. tensor->buffer = tensor->view_src->buffer;
  1383. tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
  1384. ggml_backend_buffer_init_tensor(tensor->buffer, tensor);
  1385. }
  1386. void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) {
  1387. GGML_ASSERT(tensor->buffer == NULL);
  1388. GGML_ASSERT(tensor->data == NULL);
  1389. GGML_ASSERT(tensor->view_src == NULL);
  1390. GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer));
  1391. GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
  1392. (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer));
  1393. tensor->buffer = buffer;
  1394. tensor->data = addr;
  1395. ggml_backend_buffer_init_tensor(buffer, tensor);
  1396. }
  1397. static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
  1398. struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
  1399. GGML_ASSERT(src != NULL);
  1400. GGML_ASSERT(src->data && "graph must be allocated");
  1401. size_t id = ggml_hash_insert(&hash_set, src);
  1402. if (id == GGML_HASHSET_ALREADY_EXISTS) {
  1403. return node_copies[ggml_hash_find(&hash_set, src)];
  1404. }
  1405. struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
  1406. if (src->view_src != NULL) {
  1407. dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
  1408. dst->view_offs = src->view_offs;
  1409. }
  1410. dst->op = src->op;
  1411. memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
  1412. ggml_set_name(dst, src->name);
  1413. // copy src
  1414. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1415. struct ggml_tensor * s = src->src[i];
  1416. if (s == NULL) {
  1417. continue;
  1418. }
  1419. dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
  1420. }
  1421. node_copies[id] = dst;
  1422. return dst;
  1423. }
  1424. static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
  1425. size_t id = ggml_hash_find(hash_set, src);
  1426. if (node_init[id]) {
  1427. return;
  1428. }
  1429. node_init[id] = true;
  1430. struct ggml_tensor * dst = node_copies[id];
  1431. if (dst->view_src != NULL) {
  1432. graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
  1433. ggml_backend_view_init(dst);
  1434. }
  1435. else {
  1436. ggml_backend_tensor_copy(src, dst);
  1437. }
  1438. // init src
  1439. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1440. struct ggml_tensor * s = src->src[i];
  1441. if (s == NULL) {
  1442. continue;
  1443. }
  1444. graph_copy_init_tensor(hash_set, node_copies, node_init, s);
  1445. }
  1446. }
  1447. struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
  1448. struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size);
  1449. struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
  1450. bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
  1451. struct ggml_init_params params = {
  1452. /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
  1453. /* .mem_buffer = */ NULL,
  1454. /* .no_alloc = */ true
  1455. };
  1456. struct ggml_context * ctx_allocated = ggml_init(params);
  1457. struct ggml_context * ctx_unallocated = ggml_init(params);
  1458. if (ctx_allocated == NULL || ctx_unallocated == NULL) {
  1459. GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
  1460. ggml_hash_set_free(&hash_set);
  1461. free(node_copies);
  1462. free(node_init);
  1463. ggml_free(ctx_allocated);
  1464. ggml_free(ctx_unallocated);
  1465. return {
  1466. /* .buffer = */ NULL,
  1467. /* .ctx_allocated = */ NULL,
  1468. /* .ctx_unallocated = */ NULL,
  1469. /* .graph = */ NULL,
  1470. };
  1471. }
  1472. // dup nodes
  1473. for (int i = 0; i < graph->n_nodes; i++) {
  1474. struct ggml_tensor * node = graph->nodes[i];
  1475. graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
  1476. }
  1477. // allocate nodes
  1478. ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
  1479. if (buffer == NULL) {
  1480. GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
  1481. ggml_hash_set_free(&hash_set);
  1482. free(node_copies);
  1483. free(node_init);
  1484. ggml_free(ctx_allocated);
  1485. ggml_free(ctx_unallocated);
  1486. return {
  1487. /* .buffer = */ NULL,
  1488. /* .ctx_allocated = */ NULL,
  1489. /* .ctx_unallocated = */ NULL,
  1490. /* .graph = */ NULL,
  1491. };
  1492. }
  1493. //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
  1494. // copy data and init views
  1495. for (int i = 0; i < graph->n_nodes; i++) {
  1496. struct ggml_tensor * node = graph->nodes[i];
  1497. graph_copy_init_tensor(&hash_set, node_copies, node_init, node);
  1498. }
  1499. // build graph copy
  1500. struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false);
  1501. for (int i = 0; i < graph->n_nodes; i++) {
  1502. struct ggml_tensor * node = graph->nodes[i];
  1503. struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)];
  1504. graph_copy->nodes[i] = node_copy;
  1505. }
  1506. graph_copy->n_nodes = graph->n_nodes;
  1507. ggml_hash_set_free(&hash_set);
  1508. free(node_copies);
  1509. free(node_init);
  1510. return {
  1511. /* .buffer = */ buffer,
  1512. /* .ctx_allocated = */ ctx_allocated,
  1513. /* .ctx_unallocated = */ ctx_unallocated,
  1514. /* .graph = */ graph_copy,
  1515. };
  1516. }
  1517. void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
  1518. ggml_backend_buffer_free(copy.buffer);
  1519. ggml_free(copy.ctx_allocated);
  1520. ggml_free(copy.ctx_unallocated);
  1521. }
  1522. bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
  1523. struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
  1524. if (copy.buffer == NULL) {
  1525. return false;
  1526. }
  1527. struct ggml_cgraph * g1 = graph;
  1528. struct ggml_cgraph * g2 = copy.graph;
  1529. assert(g1->n_nodes == g2->n_nodes);
  1530. for (int i = 0; i < g1->n_nodes; i++) {
  1531. //printf("eval %d/%d\n", i, g1->n_nodes);
  1532. struct ggml_tensor * t1 = g1->nodes[i];
  1533. struct ggml_tensor * t2 = g2->nodes[i];
  1534. assert(t1->op == t2->op && ggml_are_same_layout(t1, t2));
  1535. struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
  1536. struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
  1537. ggml_backend_graph_compute(backend1, &g1v);
  1538. ggml_backend_graph_compute(backend2, &g2v);
  1539. if (ggml_is_view_op(t1->op)) {
  1540. continue;
  1541. }
  1542. // compare results, calculate rms etc
  1543. if (!callback(i, t1, t2, user_data)) {
  1544. break;
  1545. }
  1546. }
  1547. ggml_backend_graph_copy_free(copy);
  1548. return true;
  1549. }
  1550. // CPU backend - buffer
  1551. static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
  1552. uintptr_t data = (uintptr_t)buffer->context;
  1553. // align the buffer
  1554. if (data % TENSOR_ALIGNMENT != 0) {
  1555. data = GGML_PAD(data, TENSOR_ALIGNMENT);
  1556. }
  1557. return (void *)data;
  1558. }
  1559. static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  1560. ggml_aligned_free(buffer->context, buffer->size);
  1561. }
  1562. static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  1563. memset((char *)tensor->data + offset, value, size);
  1564. GGML_UNUSED(buffer);
  1565. }
  1566. static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  1567. memcpy((char *)tensor->data + offset, data, size);
  1568. GGML_UNUSED(buffer);
  1569. }
  1570. static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  1571. memcpy(data, (const char *)tensor->data + offset, size);
  1572. GGML_UNUSED(buffer);
  1573. }
  1574. static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  1575. if (ggml_backend_buffer_is_host(src->buffer)) {
  1576. memcpy(dst->data, src->data, ggml_nbytes(src));
  1577. return true;
  1578. }
  1579. return false;
  1580. GGML_UNUSED(buffer);
  1581. }
  1582. static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  1583. memset(buffer->context, value, buffer->size);
  1584. }
  1585. static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = {
  1586. /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
  1587. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  1588. /* .init_tensor = */ NULL, // no initialization required
  1589. /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor,
  1590. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  1591. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  1592. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  1593. /* .clear = */ ggml_backend_cpu_buffer_clear,
  1594. /* .reset = */ NULL,
  1595. };
  1596. static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = {
  1597. /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
  1598. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  1599. /* .init_tensor = */ NULL, // no initialization required
  1600. /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor,
  1601. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  1602. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  1603. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  1604. /* .clear = */ ggml_backend_cpu_buffer_clear,
  1605. /* .reset = */ NULL,
  1606. };
  1607. // CPU backend buffer type
  1608. // this buffer type is defined here to make it available to all backends
  1609. static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  1610. return "CPU";
  1611. GGML_UNUSED(buft);
  1612. }
  1613. static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  1614. void * data = ggml_aligned_malloc(size);
  1615. if (data == NULL) {
  1616. GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
  1617. return NULL;
  1618. }
  1619. return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size);
  1620. }
  1621. static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  1622. return TENSOR_ALIGNMENT;
  1623. GGML_UNUSED(buft);
  1624. }
  1625. static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  1626. return true;
  1627. GGML_UNUSED(buft);
  1628. }
  1629. ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
  1630. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
  1631. /* .iface = */ {
  1632. /* .get_name = */ ggml_backend_cpu_buffer_type_get_name,
  1633. /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
  1634. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  1635. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1636. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  1637. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  1638. },
  1639. /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
  1640. /* .context = */ NULL,
  1641. };
  1642. return &ggml_backend_cpu_buffer_type;
  1643. }
  1644. static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) {
  1645. return "CPU_Mapped";
  1646. GGML_UNUSED(buft);
  1647. }
  1648. static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) {
  1649. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
  1650. /* .iface = */ {
  1651. /* .get_name = */ ggml_backend_cpu_buffer_from_ptr_type_get_name,
  1652. /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
  1653. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  1654. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1655. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  1656. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  1657. },
  1658. /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
  1659. /* .context = */ NULL,
  1660. };
  1661. return &ggml_backend_cpu_buffer_type;
  1662. }
  1663. ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
  1664. GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
  1665. return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size);
  1666. }