ggml-backend.cpp 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004
  1. // Note: porting this file to C++ is a work in progress
  2. #ifdef _WIN32
  3. #define WIN32_LEAN_AND_MEAN
  4. #ifndef NOMINMAX
  5. # define NOMINMAX
  6. #endif
  7. #include <windows.h>
  8. #endif
  9. #include "ggml-backend.h"
  10. #include "ggml-backend-impl.h"
  11. #include "ggml-alloc.h"
  12. #include "ggml-impl.h"
  13. #include <assert.h>
  14. #include <limits.h>
  15. #include <stdarg.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <string.h>
  19. #include <string>
  20. #include <vector>
  21. #include <algorithm>
  22. #ifdef __APPLE__
  23. #include <sys/types.h>
  24. #include <sys/sysctl.h>
  25. #endif
  26. // backend buffer type
  27. const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
  28. return buft->iface.get_name(buft);
  29. }
  30. ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  31. if (size == 0) {
  32. // return a dummy buffer for zero-sized allocations
  33. return ggml_backend_buffer_init(buft, {}, NULL, 0);
  34. }
  35. return buft->iface.alloc_buffer(buft, size);
  36. }
  37. size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
  38. return buft->iface.get_alignment(buft);
  39. }
  40. size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
  41. // get_max_size is optional, defaults to SIZE_MAX
  42. if (buft->iface.get_max_size) {
  43. return buft->iface.get_max_size(buft);
  44. }
  45. return SIZE_MAX;
  46. }
  47. size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
  48. // get_alloc_size is optional, defaults to ggml_nbytes
  49. if (buft->iface.get_alloc_size) {
  50. size_t size = buft->iface.get_alloc_size(buft, tensor);
  51. assert(size >= ggml_nbytes(tensor));
  52. return size;
  53. }
  54. return ggml_nbytes(tensor);
  55. }
  56. bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
  57. if (buft->iface.is_host) {
  58. return buft->iface.is_host(buft);
  59. }
  60. return false;
  61. }
  62. ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) {
  63. return buft->device;
  64. }
  65. // backend buffer
  66. ggml_backend_buffer_t ggml_backend_buffer_init(
  67. ggml_backend_buffer_type_t buft,
  68. struct ggml_backend_buffer_i iface,
  69. void * context,
  70. size_t size) {
  71. ggml_backend_buffer_t buffer = new ggml_backend_buffer {
  72. /* .interface = */ iface,
  73. /* .buft = */ buft,
  74. /* .context = */ context,
  75. /* .size = */ size,
  76. /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
  77. };
  78. return buffer;
  79. }
  80. const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
  81. return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer));
  82. }
  83. void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
  84. if (buffer == NULL) {
  85. return;
  86. }
  87. if (buffer->iface.free_buffer != NULL) {
  88. buffer->iface.free_buffer(buffer);
  89. }
  90. delete buffer;
  91. }
  92. size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
  93. return buffer->size;
  94. }
  95. void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
  96. // get_base is optional if the buffer is zero-sized
  97. if (buffer->size == 0) {
  98. return NULL;
  99. }
  100. void * base = buffer->iface.get_base(buffer);
  101. GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
  102. return base;
  103. }
  104. enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  105. // init_tensor is optional
  106. if (buffer->iface.init_tensor) {
  107. return buffer->iface.init_tensor(buffer, tensor);
  108. }
  109. return GGML_STATUS_SUCCESS;
  110. }
  111. void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  112. // clear is optional if the buffer is zero-sized
  113. if (buffer->size == 0) {
  114. return;
  115. }
  116. buffer->iface.clear(buffer, value);
  117. }
  118. size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
  119. return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
  120. }
  121. size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
  122. return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
  123. }
  124. size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  125. return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
  126. }
  127. bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
  128. return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
  129. }
  130. void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  131. buffer->usage = usage;
  132. // FIXME: add a generic callback to the buffer interface
  133. if (ggml_backend_buffer_is_multi_buffer(buffer)) {
  134. ggml_backend_multi_buffer_set_usage(buffer, usage);
  135. }
  136. }
  137. enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) {
  138. return buffer->usage;
  139. }
  140. ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
  141. return buffer->buft;
  142. }
  143. void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
  144. if (buffer->iface.reset) {
  145. buffer->iface.reset(buffer);
  146. }
  147. }
  148. bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
  149. ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
  150. if (dst_buf->iface.cpy_tensor) {
  151. return dst_buf->iface.cpy_tensor(dst_buf, src, dst);
  152. }
  153. return false;
  154. }
  155. // backend
  156. ggml_guid_t ggml_backend_guid(ggml_backend_t backend) {
  157. if (backend == NULL) {
  158. return NULL;
  159. }
  160. return backend->guid;
  161. }
  162. const char * ggml_backend_name(ggml_backend_t backend) {
  163. if (backend == NULL) {
  164. return "NULL";
  165. }
  166. return backend->iface.get_name(backend);
  167. }
  168. void ggml_backend_free(ggml_backend_t backend) {
  169. if (backend == NULL) {
  170. return;
  171. }
  172. backend->iface.free(backend);
  173. }
  174. ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) {
  175. return ggml_backend_dev_buffer_type(backend->device);
  176. }
  177. ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
  178. return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size);
  179. }
  180. size_t ggml_backend_get_alignment(ggml_backend_t backend) {
  181. return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend));
  182. }
  183. size_t ggml_backend_get_max_size(ggml_backend_t backend) {
  184. return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend));
  185. }
  186. void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  187. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  188. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  189. if (backend->iface.set_tensor_async == NULL) {
  190. ggml_backend_tensor_set(tensor, data, offset, size);
  191. } else {
  192. backend->iface.set_tensor_async(backend, tensor, data, offset, size);
  193. }
  194. }
  195. void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  196. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  197. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  198. if (backend->iface.get_tensor_async == NULL) {
  199. ggml_backend_tensor_get(tensor, data, offset, size);
  200. } else {
  201. backend->iface.get_tensor_async(backend, tensor, data, offset, size);
  202. }
  203. }
  204. void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  205. GGML_ASSERT(tensor);
  206. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  207. if (size == 0) {
  208. return;
  209. }
  210. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  211. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  212. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  213. buf->iface.set_tensor(buf, tensor, data, offset, size);
  214. }
  215. void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  216. GGML_ASSERT(tensor);
  217. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  218. if (size == 0) {
  219. return;
  220. }
  221. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  222. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  223. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  224. buf->iface.get_tensor(buf, tensor, data, offset, size);
  225. }
  226. void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  227. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  228. if (size == 0) {
  229. return;
  230. }
  231. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  232. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  233. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  234. GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer");
  235. buf->iface.memset_tensor(buf, tensor, value, offset, size);
  236. }
  237. void ggml_backend_synchronize(ggml_backend_t backend) {
  238. if (backend->iface.synchronize == NULL) {
  239. return;
  240. }
  241. backend->iface.synchronize(backend);
  242. }
  243. ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  244. GGML_ASSERT(backend->iface.graph_plan_create != NULL);
  245. return backend->iface.graph_plan_create(backend, cgraph);
  246. }
  247. void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  248. GGML_ASSERT(backend->iface.graph_plan_free != NULL);
  249. backend->iface.graph_plan_free(backend, plan);
  250. }
  251. enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  252. GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
  253. return backend->iface.graph_plan_compute(backend, plan);
  254. }
  255. enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  256. enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph);
  257. ggml_backend_synchronize(backend);
  258. return err;
  259. }
  260. enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  261. return backend->iface.graph_compute(backend, cgraph);
  262. }
  263. bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  264. return ggml_backend_dev_supports_op(backend->device, op);
  265. }
  266. bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  267. return ggml_backend_dev_supports_buft(backend->device, buft);
  268. }
  269. bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  270. return ggml_backend_dev_offload_op(backend->device, op);
  271. }
  272. ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) {
  273. return backend->device;
  274. }
  275. // backend copy
  276. static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
  277. if (a->type != b->type) {
  278. return false;
  279. }
  280. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  281. if (a->ne[i] != b->ne[i]) {
  282. return false;
  283. }
  284. if (a->nb[i] != b->nb[i]) {
  285. return false;
  286. }
  287. }
  288. return true;
  289. }
  290. void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
  291. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  292. if (src == dst) {
  293. return;
  294. }
  295. if (ggml_backend_buffer_is_host(src->buffer)) {
  296. ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
  297. } else if (ggml_backend_buffer_is_host(dst->buffer)) {
  298. ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
  299. } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
  300. #ifndef NDEBUG
  301. GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
  302. #endif
  303. size_t nbytes = ggml_nbytes(src);
  304. void * data = malloc(nbytes);
  305. ggml_backend_tensor_get(src, data, 0, nbytes);
  306. ggml_backend_tensor_set(dst, data, 0, nbytes);
  307. free(data);
  308. }
  309. }
  310. void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) {
  311. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  312. if (src == dst) {
  313. return;
  314. }
  315. if (backend_dst->iface.cpy_tensor_async != NULL) {
  316. if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
  317. return;
  318. }
  319. }
  320. // an async copy would normally happen after all the queued operations on both backends are completed
  321. // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
  322. ggml_backend_synchronize(backend_src);
  323. ggml_backend_synchronize(backend_dst);
  324. ggml_backend_tensor_copy(src, dst);
  325. }
  326. // events
  327. ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) {
  328. // null device is allowed for the transition period to the device interface
  329. if (device == NULL || device->iface.event_new == NULL) {
  330. return NULL;
  331. }
  332. return device->iface.event_new(device);
  333. }
  334. void ggml_backend_event_free(ggml_backend_event_t event) {
  335. if (event == NULL) {
  336. return;
  337. }
  338. event->device->iface.event_free(event->device, event);
  339. }
  340. void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) {
  341. GGML_ASSERT(backend->iface.event_record != NULL);
  342. backend->iface.event_record(backend, event);
  343. }
  344. void ggml_backend_event_synchronize(ggml_backend_event_t event) {
  345. GGML_ASSERT(event->device->iface.event_synchronize);
  346. event->device->iface.event_synchronize(event->device, event);
  347. }
  348. void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) {
  349. GGML_ASSERT(backend->iface.event_wait != NULL);
  350. backend->iface.event_wait(backend, event);
  351. }
  352. // Backend device
  353. const char * ggml_backend_dev_name(ggml_backend_dev_t device) {
  354. return device->iface.get_name(device);
  355. }
  356. const char * ggml_backend_dev_description(ggml_backend_dev_t device) {
  357. return device->iface.get_description(device);
  358. }
  359. void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
  360. device->iface.get_memory(device, free, total);
  361. }
  362. enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
  363. return device->iface.get_type(device);
  364. }
  365. void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
  366. memset(props, 0, sizeof(*props));
  367. device->iface.get_props(device, props);
  368. }
  369. ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) {
  370. return device->reg;
  371. }
  372. ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) {
  373. return device->iface.init_backend(device, params);
  374. }
  375. ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) {
  376. return device->iface.get_buffer_type(device);
  377. }
  378. ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
  379. if (device->iface.get_host_buffer_type == NULL) {
  380. return NULL;
  381. }
  382. return device->iface.get_host_buffer_type(device);
  383. }
  384. ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
  385. return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
  386. }
  387. bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
  388. return device->iface.supports_op(device, op);
  389. }
  390. bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) {
  391. return device->iface.supports_buft(device, buft);
  392. }
  393. bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
  394. if (device->iface.offload_op != NULL) {
  395. return device->iface.offload_op(device, op);
  396. }
  397. return false;
  398. }
  399. // Backend (reg)
  400. const char * ggml_backend_reg_name(ggml_backend_reg_t reg) {
  401. return reg->iface.get_name(reg);
  402. }
  403. size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) {
  404. return reg->iface.get_device_count(reg);
  405. }
  406. ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) {
  407. return reg->iface.get_device(reg, index);
  408. }
  409. void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  410. if (!reg->iface.get_proc_address) {
  411. return NULL;
  412. }
  413. return reg->iface.get_proc_address(reg, name);
  414. }
  415. // multi-buffer buffer
  416. struct ggml_backend_multi_buffer_context {
  417. ggml_backend_buffer_t * buffers;
  418. size_t n_buffers;
  419. };
  420. static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  421. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  422. for (size_t i = 0; i < ctx->n_buffers; i++) {
  423. ggml_backend_buffer_free(ctx->buffers[i]);
  424. }
  425. free(ctx->buffers);
  426. free(ctx);
  427. }
  428. static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  429. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  430. for (size_t i = 0; i < ctx->n_buffers; i++) {
  431. ggml_backend_buffer_clear(ctx->buffers[i], value);
  432. }
  433. }
  434. static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = {
  435. /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer,
  436. /* .get_base = */ NULL,
  437. /* .init_tensor = */ NULL,
  438. /* .memset_tensor = */ NULL,
  439. /* .set_tensor = */ NULL,
  440. /* .get_tensor = */ NULL,
  441. /* .cpy_tensor = */ NULL,
  442. /* .clear = */ ggml_backend_multi_buffer_clear,
  443. /* .reset = */ NULL,
  444. };
  445. ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) {
  446. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context));
  447. ctx->n_buffers = n_buffers;
  448. ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
  449. GGML_ASSERT(ctx->buffers != NULL);
  450. size_t total_size = 0;
  451. for (size_t i = 0; i < n_buffers; i++) {
  452. ctx->buffers[i] = buffers[i];
  453. total_size += ggml_backend_buffer_get_size(buffers[i]);
  454. }
  455. return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size);
  456. }
  457. bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) {
  458. return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer;
  459. }
  460. void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  461. GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer));
  462. ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
  463. for (size_t i = 0; i < ctx->n_buffers; i++) {
  464. ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
  465. }
  466. }
  467. // creates a copy of the tensor with the same memory layout
  468. static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
  469. struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
  470. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  471. dup->nb[i] = tensor->nb[i];
  472. }
  473. return dup;
  474. }
  475. static bool ggml_is_view_op(enum ggml_op op) {
  476. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  477. }
  478. // scheduler
  479. #ifndef GGML_SCHED_MAX_BACKENDS
  480. #define GGML_SCHED_MAX_BACKENDS 16
  481. #endif
  482. #ifndef GGML_SCHED_MAX_SPLIT_INPUTS
  483. #define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC
  484. #endif
  485. #ifndef GGML_SCHED_MAX_COPIES
  486. #define GGML_SCHED_MAX_COPIES 4
  487. #endif
  488. struct ggml_backend_sched_split {
  489. int backend_id;
  490. int i_start;
  491. int i_end;
  492. struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
  493. int n_inputs;
  494. // graph view of this split
  495. struct ggml_cgraph graph;
  496. };
  497. struct ggml_backend_sched {
  498. bool is_reset; // true if the scheduler has been reset since the last graph split
  499. bool is_alloc;
  500. int n_backends;
  501. ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS];
  502. ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS];
  503. ggml_gallocr_t galloc;
  504. // hash map of the nodes in the graph
  505. struct ggml_hash_set hash_set;
  506. int * hv_tensor_backend_ids; // [hash_set.size]
  507. struct ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies]
  508. int * node_backend_ids; // [graph_size]
  509. int * leaf_backend_ids; // [graph_size]
  510. int * prev_node_backend_ids; // [graph_size]
  511. int * prev_leaf_backend_ids; // [graph_size]
  512. // copy of the graph with modified inputs
  513. struct ggml_cgraph graph;
  514. // graph splits
  515. struct ggml_backend_sched_split * splits;
  516. int n_splits;
  517. int splits_capacity;
  518. // pipeline parallelism support
  519. int n_copies;
  520. int cur_copy;
  521. ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES];
  522. struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
  523. int n_graph_inputs;
  524. struct ggml_context * ctx;
  525. ggml_backend_sched_eval_callback callback_eval;
  526. void * callback_eval_user_data;
  527. char * context_buffer;
  528. size_t context_buffer_size;
  529. int debug;
  530. };
  531. #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
  532. #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
  533. #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
  534. #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
  535. // returns the priority of the backend, lower id is higher priority
  536. static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) {
  537. for (int i = 0; i < sched->n_backends; i++) {
  538. if (sched->backends[i] == backend) {
  539. return i;
  540. }
  541. }
  542. return -1;
  543. }
  544. static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) {
  545. ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  546. if (buffer == NULL) {
  547. return -1;
  548. }
  549. // find highest prio backend that supports the buffer type and the op
  550. for (int i = 0; i < sched->n_backends; i++) {
  551. if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
  552. ggml_backend_supports_op(sched->backends[i], op)) {
  553. return i;
  554. }
  555. }
  556. #ifndef NDEBUG
  557. GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
  558. __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
  559. #endif
  560. return -1;
  561. }
  562. #if 0
  563. #define GGML_SCHED_MAX_SPLITS_DEBUG 4096
  564. static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
  565. #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
  566. #define GET_CAUSE(node) causes[hash_id(node)]
  567. #else
  568. #define SET_CAUSE(node, ...)
  569. #define GET_CAUSE(node) ""
  570. #endif
  571. // returns the backend that should be used for the node based on the current locations
  572. static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) {
  573. // assign pre-allocated nodes to their backend
  574. int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
  575. if (cur_backend_id != -1) {
  576. SET_CAUSE(tensor, "1.dst");
  577. return cur_backend_id;
  578. }
  579. // view_src
  580. if (tensor->view_src != NULL) {
  581. cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
  582. if (cur_backend_id != -1) {
  583. SET_CAUSE(tensor, "1.vsrc");
  584. return cur_backend_id;
  585. }
  586. }
  587. if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
  588. // since the tensor is pre-allocated, it cannot be moved to another backend
  589. ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  590. GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op));
  591. }
  592. // graph input
  593. if (tensor->flags & GGML_TENSOR_FLAG_INPUT) {
  594. cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU)
  595. SET_CAUSE(tensor, "1.inp");
  596. return cur_backend_id;
  597. }
  598. // operations with weights are preferably run on the same backend as the weights
  599. for (int i = 0; i < GGML_MAX_SRC; i++) {
  600. const struct ggml_tensor * src = tensor->src[i];
  601. if (src == NULL) {
  602. continue;
  603. }
  604. // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
  605. // not an ideal solution
  606. if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
  607. int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
  608. // check if a backend with higher prio wants to offload the op
  609. if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
  610. for (int b = 0; b < src_backend_id; b++) {
  611. if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
  612. SET_CAUSE(tensor, "1.off");
  613. return b;
  614. }
  615. }
  616. }
  617. SET_CAUSE(tensor, "1.wgt%d", i);
  618. return src_backend_id;
  619. }
  620. }
  621. return -1;
  622. }
  623. static char * fmt_size(size_t size) {
  624. static char buffer[128];
  625. if (size >= 1024*1024) {
  626. snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
  627. } else {
  628. snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
  629. }
  630. return buffer;
  631. }
  632. static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  633. int cur_split = 0;
  634. for (int i = 0; i < graph->n_nodes; i++) {
  635. if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
  636. ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
  637. GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend),
  638. sched->splits[cur_split].n_inputs);
  639. for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
  640. if (j == 0) {
  641. GGML_LOG_DEBUG(": ");
  642. }
  643. GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
  644. fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
  645. }
  646. GGML_LOG_DEBUG("\n");
  647. cur_split++;
  648. }
  649. struct ggml_tensor * node = graph->nodes[i];
  650. if (ggml_is_view_op(node->op)) {
  651. continue;
  652. }
  653. if (sched->debug > 1) {
  654. ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node);
  655. GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
  656. fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node));
  657. for (int j = 0; j < GGML_MAX_SRC; j++) {
  658. struct ggml_tensor * src = node->src[j];
  659. if (src == NULL) {
  660. continue;
  661. }
  662. ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src);
  663. GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
  664. fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
  665. }
  666. GGML_LOG_DEBUG("\n");
  667. }
  668. }
  669. }
  670. static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) {
  671. ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
  672. ggml_backend_buffer_type_t buft = NULL;
  673. if (buf) {
  674. // the tensor is already allocated
  675. buft = buf->buft;
  676. } else {
  677. // see if the tensor already has a backend assigned, and use the buffer type of that backend
  678. int tensor_backend_id = tensor_backend_id(t);
  679. if (tensor_backend_id == -1 && t->view_src) {
  680. tensor_backend_id = tensor_backend_id(t->view_src);
  681. }
  682. if (tensor_backend_id != -1) {
  683. buft = sched->bufts[tensor_backend_id];
  684. }
  685. }
  686. return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft);
  687. }
  688. static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
  689. if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
  690. *node_backend_id = cur_backend_id;
  691. SET_CAUSE(node, "2.sup");
  692. }
  693. }
  694. // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
  695. static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  696. // reset splits
  697. sched->n_splits = 0;
  698. sched->n_graph_inputs = 0;
  699. sched->is_reset = false;
  700. struct ggml_init_params params = {
  701. /* .mem_size = */ sched->context_buffer_size,
  702. /* .mem_buffer = */ sched->context_buffer,
  703. /* .no_alloc = */ true
  704. };
  705. ggml_free(sched->ctx);
  706. sched->ctx = ggml_init(params);
  707. if (sched->ctx == NULL) {
  708. GGML_ABORT("%s: failed to initialize context\n", __func__);
  709. }
  710. // pass 1: assign backends to ops with pre-allocated inputs
  711. for (int i = 0; i < graph->n_leafs; i++) {
  712. struct ggml_tensor * leaf = graph->leafs[i];
  713. int * leaf_backend_id = &tensor_backend_id(leaf);
  714. // do not overwrite user assignments
  715. if (*leaf_backend_id == -1) {
  716. *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf);
  717. }
  718. }
  719. for (int i = 0; i < graph->n_nodes; i++) {
  720. struct ggml_tensor * node = graph->nodes[i];
  721. int * node_backend_id = &tensor_backend_id(node);
  722. // do not overwrite user assignments
  723. if (*node_backend_id == -1) {
  724. *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node);
  725. #if 0
  726. // src
  727. if (node->op == GGML_OP_NONE) {
  728. continue;
  729. }
  730. for (int j = 0; j < GGML_MAX_SRC; j++) {
  731. struct ggml_tensor * src = node->src[j];
  732. if (src == NULL) {
  733. continue;
  734. }
  735. int * src_backend_id = &tensor_backend_id(src);
  736. if (*src_backend_id == -1) {
  737. *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src);
  738. }
  739. }
  740. #endif
  741. }
  742. }
  743. // pass 2: expand current backend assignments
  744. // assign the same backend to adjacent nodes
  745. // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
  746. // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
  747. // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
  748. // expand gpu down
  749. {
  750. int cur_backend_id = -1;
  751. for (int i = 0; i < graph->n_nodes; i++) {
  752. struct ggml_tensor * node = graph->nodes[i];
  753. if (ggml_is_view_op(node->op)) {
  754. continue;
  755. }
  756. int * node_backend_id = &tensor_backend_id(node);
  757. if (*node_backend_id != -1) {
  758. if (*node_backend_id == sched->n_backends - 1) {
  759. // skip cpu (lowest prio backend)
  760. cur_backend_id = -1;
  761. } else {
  762. cur_backend_id = *node_backend_id;
  763. }
  764. } else if (cur_backend_id != -1) {
  765. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  766. }
  767. }
  768. }
  769. // expand gpu up
  770. {
  771. int cur_backend_id = -1;
  772. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  773. struct ggml_tensor * node = graph->nodes[i];
  774. if (ggml_is_view_op(node->op)) {
  775. continue;
  776. }
  777. int * node_backend_id = &tensor_backend_id(node);
  778. if (*node_backend_id != -1) {
  779. if (*node_backend_id == sched->n_backends - 1) {
  780. // skip cpu (lowest prio backend)
  781. cur_backend_id = -1;
  782. } else {
  783. cur_backend_id = *node_backend_id;
  784. }
  785. } else if (cur_backend_id != -1) {
  786. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  787. }
  788. }
  789. }
  790. // expand rest down
  791. {
  792. int cur_backend_id = -1;
  793. for (int i = 0; i < graph->n_nodes; i++) {
  794. struct ggml_tensor * node = graph->nodes[i];
  795. if (ggml_is_view_op(node->op)) {
  796. continue;
  797. }
  798. int * node_backend_id = &tensor_backend_id(node);
  799. if (*node_backend_id != -1) {
  800. cur_backend_id = *node_backend_id;
  801. } else if (cur_backend_id != -1) {
  802. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  803. }
  804. }
  805. }
  806. // expand rest up
  807. {
  808. int cur_backend_id = -1;
  809. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  810. struct ggml_tensor * node = graph->nodes[i];
  811. if (ggml_is_view_op(node->op)) {
  812. continue;
  813. }
  814. int * node_backend_id = &tensor_backend_id(node);
  815. if (*node_backend_id != -1) {
  816. cur_backend_id = *node_backend_id;
  817. } else if (cur_backend_id != -1) {
  818. ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
  819. }
  820. }
  821. }
  822. // pass 3: upgrade nodes to higher prio backends with compatible buffer types
  823. // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
  824. // however, we also need to verify that the sources are in compatible buffer types
  825. // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
  826. // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
  827. // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
  828. // additionally, set remaining unassigned nodes to the backend with the most supported inputs
  829. // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
  830. for (int i = 0; i < graph->n_nodes; i++) {
  831. struct ggml_tensor * node = graph->nodes[i];
  832. if (ggml_is_view_op(node->op)) {
  833. continue;
  834. }
  835. int * node_backend_id = &tensor_backend_id(node);
  836. if (*node_backend_id == -1) {
  837. // unassigned node: find the backend with the most supported inputs
  838. int n_supported_best = -1;
  839. for (int b = 0; b < sched->n_backends; b++) {
  840. if (ggml_backend_supports_op(sched->backends[b], node)) {
  841. int n_supported = 0;
  842. for (int j = 0; j < GGML_MAX_SRC; j++) {
  843. struct ggml_tensor * src = node->src[j];
  844. if (src == NULL) {
  845. continue;
  846. }
  847. if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) {
  848. n_supported++;
  849. }
  850. }
  851. if (n_supported > n_supported_best) {
  852. n_supported_best = n_supported;
  853. *node_backend_id = b;
  854. SET_CAUSE(node, "3.best");
  855. }
  856. }
  857. }
  858. } else {
  859. // assigned node: upgrade to higher prio backend if possible
  860. for (int b = 0; b < *node_backend_id; b++) {
  861. if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) {
  862. bool supported = true;
  863. for (int j = 0; j < GGML_MAX_SRC; j++) {
  864. struct ggml_tensor * src = node->src[j];
  865. if (src == NULL) {
  866. continue;
  867. }
  868. if (!ggml_backend_sched_buffer_supported(sched, src, b)) {
  869. supported = false;
  870. break;
  871. }
  872. }
  873. if (supported) {
  874. *node_backend_id = b;
  875. SET_CAUSE(node, "3.upg");
  876. break;
  877. }
  878. }
  879. }
  880. }
  881. }
  882. // pass 4: assign backends to remaining src from dst and view_src
  883. for (int i = 0; i < graph->n_nodes; i++) {
  884. struct ggml_tensor * node = graph->nodes[i];
  885. int * cur_backend_id = &tensor_backend_id(node);
  886. if (node->view_src != NULL && *cur_backend_id == -1) {
  887. *cur_backend_id = tensor_backend_id(node->view_src);
  888. SET_CAUSE(node, "4.vsrc");
  889. }
  890. for (int j = 0; j < GGML_MAX_SRC; j++) {
  891. struct ggml_tensor * src = node->src[j];
  892. if (src == NULL) {
  893. continue;
  894. }
  895. int * src_backend_id = &tensor_backend_id(src);
  896. if (*src_backend_id == -1) {
  897. if (src->view_src != NULL) {
  898. // views are always on the same backend as the source
  899. *src_backend_id = tensor_backend_id(src->view_src);
  900. SET_CAUSE(src, "4.vsrc");
  901. } else {
  902. *src_backend_id = *cur_backend_id;
  903. SET_CAUSE(src, "4.cur");
  904. }
  905. }
  906. }
  907. }
  908. // pass 5: split graph, find tensors that need to be copied
  909. {
  910. int i_split = 0;
  911. struct ggml_backend_sched_split * split = &sched->splits[0];
  912. // find the backend of the first split, skipping view ops
  913. int i = 0;
  914. for (; i < graph->n_nodes; i++) {
  915. struct ggml_tensor * node = graph->nodes[i];
  916. if (!ggml_is_view_op(node->op)) {
  917. split->backend_id = tensor_backend_id(node);
  918. break;
  919. }
  920. }
  921. split->i_start = 0;
  922. split->n_inputs = 0;
  923. int cur_backend_id = split->backend_id;
  924. for (; i < graph->n_nodes; i++) {
  925. struct ggml_tensor * node = graph->nodes[i];
  926. if (ggml_is_view_op(node->op)) {
  927. continue;
  928. }
  929. const int node_backend_id = tensor_backend_id(node);
  930. assert(node_backend_id != -1); // all nodes should be assigned by now
  931. // check if we should start a new split based on the sources of the current node
  932. bool need_new_split = false;
  933. if (node_backend_id == cur_backend_id && split->n_inputs > 0) {
  934. for (int j = 0; j < GGML_MAX_SRC; j++) {
  935. struct ggml_tensor * src = node->src[j];
  936. if (src == NULL) {
  937. continue;
  938. }
  939. // check if a weight is on a different and incompatible backend
  940. // by starting a new split, the memory of the previously offloaded weights can be reused
  941. if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
  942. int src_backend_id = tensor_backend_id(src);
  943. if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
  944. need_new_split = true;
  945. break;
  946. }
  947. }
  948. // check if the split has too many inputs
  949. // FIXME: count the number of inputs instead of only checking when full
  950. if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) {
  951. const size_t id = hash_id(src);
  952. int src_backend_id = sched->hv_tensor_backend_ids[id];
  953. bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
  954. if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) {
  955. need_new_split = true;
  956. break;
  957. }
  958. }
  959. }
  960. }
  961. if (node_backend_id != cur_backend_id || need_new_split) {
  962. split->i_end = i;
  963. i_split++;
  964. if (i_split >= sched->splits_capacity) {
  965. sched->splits_capacity *= 2;
  966. sched->splits = (ggml_backend_sched_split *)
  967. realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split));
  968. GGML_ASSERT(sched->splits != NULL);
  969. }
  970. split = &sched->splits[i_split];
  971. split->backend_id = node_backend_id;
  972. split->i_start = i;
  973. split->n_inputs = 0;
  974. cur_backend_id = node_backend_id;
  975. }
  976. // find inputs that are not on the same backend
  977. for (int j = 0; j < GGML_MAX_SRC; j++) {
  978. struct ggml_tensor * src = node->src[j];
  979. if (src == NULL) {
  980. continue;
  981. }
  982. size_t src_id = hash_id(src);
  983. const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
  984. assert(src_backend_id != -1); // all inputs should be assigned by now
  985. if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
  986. if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
  987. ggml_backend_t backend = sched->backends[src_backend_id];
  988. for (int c = 0; c < sched->n_copies; c++) {
  989. struct ggml_tensor * tensor_copy;
  990. if (c == sched->cur_copy) {
  991. tensor_copy = src; // use the original tensor as the current copy
  992. } else {
  993. tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
  994. ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
  995. }
  996. if (sched->n_copies > 1) {
  997. ggml_set_input(tensor_copy);
  998. ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
  999. }
  1000. tensor_id_copy(src_id, src_backend_id, c) = tensor_copy;
  1001. SET_CAUSE(tensor_copy, "4.cpy");
  1002. }
  1003. int n_graph_inputs = sched->n_graph_inputs++;
  1004. GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
  1005. sched->graph_inputs[n_graph_inputs] = src;
  1006. }
  1007. }
  1008. if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
  1009. // create a copy of the input in the split's backend
  1010. if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) {
  1011. ggml_backend_t backend = sched->backends[cur_backend_id];
  1012. for (int c = 0; c < sched->n_copies; c++) {
  1013. struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
  1014. ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
  1015. if (sched->n_copies > 1) {
  1016. ggml_set_input(tensor_copy);
  1017. ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
  1018. }
  1019. tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy;
  1020. SET_CAUSE(tensor_copy, "4.cpy");
  1021. }
  1022. int n_inputs = split->n_inputs++;
  1023. GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
  1024. split->inputs[n_inputs] = src;
  1025. }
  1026. node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy);
  1027. }
  1028. }
  1029. }
  1030. split->i_end = graph->n_nodes;
  1031. sched->n_splits = i_split + 1;
  1032. }
  1033. if (sched->debug) {
  1034. ggml_backend_sched_print_assignments(sched, graph);
  1035. }
  1036. // swap node_backend_ids and leaf _backend_ids with prevs
  1037. {
  1038. int * tmp = sched->node_backend_ids;
  1039. sched->node_backend_ids = sched->prev_node_backend_ids;
  1040. sched->prev_node_backend_ids = tmp;
  1041. tmp = sched->leaf_backend_ids;
  1042. sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
  1043. sched->prev_leaf_backend_ids = tmp;
  1044. }
  1045. int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies;
  1046. if (sched->graph.size < graph_size) {
  1047. sched->graph.size = graph_size;
  1048. sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *));
  1049. sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *));
  1050. GGML_ASSERT(sched->graph.nodes != NULL);
  1051. GGML_ASSERT(sched->graph.leafs != NULL);
  1052. }
  1053. sched->graph.n_nodes = 0;
  1054. sched->graph.n_leafs = 0;
  1055. struct ggml_cgraph * graph_copy = &sched->graph;
  1056. for (int i = 0; i < sched->n_splits; i++) {
  1057. struct ggml_backend_sched_split * split = &sched->splits[i];
  1058. split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
  1059. // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
  1060. for (int j = 0; j < split->n_inputs; j++) {
  1061. assert(graph_copy->size > (graph_copy->n_nodes + 1));
  1062. struct ggml_tensor * input = split->inputs[j];
  1063. const size_t input_id = hash_id(input);
  1064. struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy);
  1065. // add a dependency to the input source so that it is not freed before the copy is done
  1066. struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input);
  1067. input_dep->src[0] = input;
  1068. sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id];
  1069. graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
  1070. // add a dependency to the input copy so that it is allocated at the start of the split
  1071. sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
  1072. graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
  1073. }
  1074. for (int j = split->i_start; j < split->i_end; j++) {
  1075. assert(graph_copy->size > graph_copy->n_nodes);
  1076. sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
  1077. graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
  1078. }
  1079. }
  1080. if (sched->n_copies > 1) {
  1081. // add input copies as leafs so that they are allocated first
  1082. for (int i = 0; i < sched->n_graph_inputs; i++) {
  1083. struct ggml_tensor * input = sched->graph_inputs[i];
  1084. size_t id = hash_id(input);
  1085. int backend_id = tensor_backend_id(input);
  1086. for (int c = 0; c < sched->n_copies; c++) {
  1087. struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
  1088. sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
  1089. assert(graph_copy->size > graph_copy->n_leafs);
  1090. graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
  1091. }
  1092. }
  1093. for (int i = 0; i < sched->n_splits; i++) {
  1094. struct ggml_backend_sched_split * split = &sched->splits[i];
  1095. int backend_id = split->backend_id;
  1096. for (int j = 0; j < split->n_inputs; j++) {
  1097. struct ggml_tensor * input = split->inputs[j];
  1098. size_t id = hash_id(input);
  1099. for (int c = 0; c < sched->n_copies; c++) {
  1100. struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
  1101. sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
  1102. assert(graph_copy->size > graph_copy->n_leafs);
  1103. graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
  1104. }
  1105. }
  1106. }
  1107. }
  1108. // add leafs from the original graph
  1109. for (int i = 0; i < graph->n_leafs; i++) {
  1110. struct ggml_tensor * leaf = graph->leafs[i];
  1111. sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf);
  1112. assert(graph_copy->size > graph_copy->n_leafs);
  1113. graph_copy->leafs[graph_copy->n_leafs++] = leaf;
  1114. }
  1115. }
  1116. static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
  1117. bool backend_ids_changed = false;
  1118. for (int i = 0; i < sched->graph.n_nodes; i++) {
  1119. if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
  1120. sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
  1121. backend_ids_changed = true;
  1122. break;
  1123. }
  1124. }
  1125. if (!backend_ids_changed) {
  1126. for (int i = 0; i < sched->graph.n_leafs; i++) {
  1127. if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
  1128. sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
  1129. backend_ids_changed = true;
  1130. break;
  1131. }
  1132. }
  1133. }
  1134. // allocate graph
  1135. if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
  1136. // the re-allocation may cause the split inputs to be moved to a different address
  1137. ggml_backend_sched_synchronize(sched);
  1138. #ifndef NDEBUG
  1139. GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
  1140. #endif
  1141. ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
  1142. if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
  1143. GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
  1144. return false;
  1145. }
  1146. }
  1147. return true;
  1148. }
  1149. static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
  1150. struct ggml_backend_sched_split * splits = sched->splits;
  1151. for (int i = 0; i < sched->n_splits; i++) {
  1152. struct ggml_backend_sched_split * split = &splits[i];
  1153. int split_backend_id = split->backend_id;
  1154. ggml_backend_t split_backend = sched->backends[split_backend_id];
  1155. // copy the input tensors to the split backend
  1156. for (int j = 0; j < split->n_inputs; j++) {
  1157. ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[j]);
  1158. struct ggml_tensor * input = split->inputs[j];
  1159. struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
  1160. if (input->flags & GGML_TENSOR_FLAG_INPUT) {
  1161. // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
  1162. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1163. ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
  1164. } else {
  1165. ggml_backend_synchronize(split_backend);
  1166. }
  1167. ggml_backend_tensor_copy(input, input_cpy);
  1168. } else {
  1169. // wait for the split backend to finish using the input before overwriting it
  1170. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1171. ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
  1172. } else {
  1173. ggml_backend_synchronize(split_backend);
  1174. }
  1175. // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
  1176. // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
  1177. if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
  1178. ggml_backend_synchronize(input_backend);
  1179. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1180. ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
  1181. } else {
  1182. ggml_backend_synchronize(split_backend);
  1183. }
  1184. ggml_backend_tensor_copy(input, input_cpy);
  1185. }
  1186. }
  1187. }
  1188. if (!sched->callback_eval) {
  1189. enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
  1190. if (ec != GGML_STATUS_SUCCESS) {
  1191. return ec;
  1192. }
  1193. } else {
  1194. // similar to ggml_backend_compare_graph_backend
  1195. for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
  1196. struct ggml_tensor * t = split->graph.nodes[j0];
  1197. // check if the user needs data from this node
  1198. bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1199. int j1 = j0;
  1200. // determine the range [j0, j1] of nodes that can be computed together
  1201. while (!need && j1 < split->graph.n_nodes - 1) {
  1202. t = split->graph.nodes[++j1];
  1203. need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1204. }
  1205. struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
  1206. enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv);
  1207. if (ec != GGML_STATUS_SUCCESS) {
  1208. return ec;
  1209. }
  1210. // TODO: pass backend to the callback, then the user can decide if they want to synchronize
  1211. ggml_backend_synchronize(split_backend);
  1212. if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
  1213. break;
  1214. }
  1215. j0 = j1;
  1216. }
  1217. }
  1218. // record the event of this copy
  1219. if (split->n_inputs > 0) {
  1220. if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
  1221. ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend);
  1222. }
  1223. }
  1224. }
  1225. sched->cur_copy = (sched->cur_copy + 1) % sched->n_copies;
  1226. return GGML_STATUS_SUCCESS;
  1227. }
  1228. ggml_backend_sched_t ggml_backend_sched_new(
  1229. ggml_backend_t * backends,
  1230. ggml_backend_buffer_type_t * bufts,
  1231. int n_backends,
  1232. size_t graph_size,
  1233. bool parallel) {
  1234. GGML_ASSERT(n_backends > 0);
  1235. GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
  1236. GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
  1237. struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched));
  1238. const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG");
  1239. sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0;
  1240. sched->n_backends = n_backends;
  1241. sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1;
  1242. // initialize hash table
  1243. // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
  1244. sched->hash_set = ggml_hash_set_new(graph_size);
  1245. sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
  1246. sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
  1247. const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
  1248. const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2;
  1249. sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
  1250. sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
  1251. sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
  1252. sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
  1253. sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
  1254. sched->context_buffer = (char *) malloc(sched->context_buffer_size);
  1255. const int initial_splits_capacity = 16;
  1256. sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0]));
  1257. sched->splits_capacity = initial_splits_capacity;
  1258. for (int b = 0; b < n_backends; b++) {
  1259. sched->backends[b] = backends[b];
  1260. sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
  1261. GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
  1262. if (sched->n_copies > 1) {
  1263. for (int c = 0; c < sched->n_copies; c++) {
  1264. sched->events[b][c] = ggml_backend_event_new(backends[b]->device);
  1265. }
  1266. }
  1267. }
  1268. sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
  1269. ggml_backend_sched_reset(sched);
  1270. return sched;
  1271. }
  1272. void ggml_backend_sched_free(ggml_backend_sched_t sched) {
  1273. if (sched == NULL) {
  1274. return;
  1275. }
  1276. for (int b = 0; b < sched->n_backends; b++) {
  1277. for (int c = 0; c < sched->n_copies; c++) {
  1278. ggml_backend_event_free(sched->events[b][c]);
  1279. }
  1280. }
  1281. ggml_gallocr_free(sched->galloc);
  1282. ggml_free(sched->ctx);
  1283. ggml_hash_set_free(&sched->hash_set);
  1284. free(sched->splits);
  1285. free(sched->hv_tensor_backend_ids);
  1286. free(sched->hv_tensor_copies);
  1287. free(sched->node_backend_ids);
  1288. free(sched->leaf_backend_ids);
  1289. free(sched->prev_node_backend_ids);
  1290. free(sched->prev_leaf_backend_ids);
  1291. free(sched->context_buffer);
  1292. free(sched->graph.nodes);
  1293. free(sched->graph.leafs);
  1294. free(sched);
  1295. }
  1296. void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
  1297. // reset state for the next run
  1298. if (!sched->is_reset) {
  1299. ggml_hash_set_reset(&sched->hash_set);
  1300. memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
  1301. memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
  1302. sched->is_reset = true;
  1303. }
  1304. sched->is_alloc = false;
  1305. }
  1306. bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
  1307. GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
  1308. ggml_backend_sched_split_graph(sched, measure_graph);
  1309. ggml_backend_sched_synchronize(sched);
  1310. if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
  1311. return false;
  1312. }
  1313. ggml_backend_sched_reset(sched);
  1314. return true;
  1315. }
  1316. bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1317. GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
  1318. ggml_backend_sched_split_graph(sched, graph);
  1319. if (!ggml_backend_sched_alloc_splits(sched)) {
  1320. return false;
  1321. }
  1322. sched->is_alloc = true;
  1323. return true;
  1324. }
  1325. enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1326. enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph);
  1327. ggml_backend_sched_synchronize(sched);
  1328. return err;
  1329. }
  1330. enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1331. if (!sched->is_reset && !sched->is_alloc) {
  1332. ggml_backend_sched_reset(sched);
  1333. }
  1334. if (!sched->is_alloc) {
  1335. if (!ggml_backend_sched_alloc_graph(sched, graph)) {
  1336. return GGML_STATUS_ALLOC_FAILED;
  1337. }
  1338. }
  1339. return ggml_backend_sched_compute_splits(sched);
  1340. }
  1341. void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) {
  1342. for (int i = 0; i < sched->n_backends; i++) {
  1343. ggml_backend_synchronize(sched->backends[i]);
  1344. }
  1345. }
  1346. void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
  1347. sched->callback_eval = callback;
  1348. sched->callback_eval_user_data = user_data;
  1349. }
  1350. int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
  1351. return sched->n_splits;
  1352. }
  1353. int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) {
  1354. return sched->n_copies;
  1355. }
  1356. int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) {
  1357. return sched->n_backends;
  1358. }
  1359. ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) {
  1360. GGML_ASSERT(i >= 0 && i < sched->n_backends);
  1361. return sched->backends[i];
  1362. }
  1363. size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
  1364. int backend_index = ggml_backend_sched_backend_id(sched, backend);
  1365. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1366. return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
  1367. }
  1368. void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
  1369. int backend_index = ggml_backend_sched_backend_id(sched, backend);
  1370. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1371. tensor_backend_id(node) = backend_index;
  1372. SET_CAUSE(node, "usr");
  1373. sched->is_reset = false;
  1374. }
  1375. ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
  1376. int backend_index = tensor_backend_id(node);
  1377. if (backend_index == -1) {
  1378. return NULL;
  1379. }
  1380. return sched->backends[backend_index];
  1381. }
  1382. // utils
  1383. enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) {
  1384. GGML_ASSERT(tensor->buffer == NULL);
  1385. GGML_ASSERT(tensor->view_src != NULL);
  1386. GGML_ASSERT(tensor->view_src->buffer != NULL);
  1387. GGML_ASSERT(tensor->view_src->data != NULL);
  1388. tensor->buffer = tensor->view_src->buffer;
  1389. tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
  1390. return ggml_backend_buffer_init_tensor(tensor->buffer, tensor);
  1391. }
  1392. enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) {
  1393. GGML_ASSERT(tensor->buffer == NULL);
  1394. GGML_ASSERT(tensor->data == NULL);
  1395. GGML_ASSERT(tensor->view_src == NULL);
  1396. GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer));
  1397. GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
  1398. (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer));
  1399. tensor->buffer = buffer;
  1400. tensor->data = addr;
  1401. return ggml_backend_buffer_init_tensor(buffer, tensor);
  1402. }
  1403. static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
  1404. struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
  1405. GGML_ASSERT(src != NULL);
  1406. GGML_ASSERT(src->data && "graph must be allocated");
  1407. size_t id = ggml_hash_insert(&hash_set, src);
  1408. if (id == GGML_HASHSET_ALREADY_EXISTS) {
  1409. return node_copies[ggml_hash_find(&hash_set, src)];
  1410. }
  1411. struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
  1412. if (src->view_src != NULL) {
  1413. dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
  1414. dst->view_offs = src->view_offs;
  1415. }
  1416. dst->op = src->op;
  1417. memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
  1418. ggml_set_name(dst, src->name);
  1419. // copy src
  1420. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1421. struct ggml_tensor * s = src->src[i];
  1422. if (s == NULL) {
  1423. continue;
  1424. }
  1425. dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
  1426. }
  1427. node_copies[id] = dst;
  1428. return dst;
  1429. }
  1430. static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
  1431. size_t id = ggml_hash_find(hash_set, src);
  1432. if (node_init[id]) {
  1433. return;
  1434. }
  1435. node_init[id] = true;
  1436. struct ggml_tensor * dst = node_copies[id];
  1437. if (dst->view_src != NULL) {
  1438. graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
  1439. enum ggml_status status = ggml_backend_view_init(dst);
  1440. GGML_ASSERT(status == GGML_STATUS_SUCCESS);
  1441. }
  1442. else {
  1443. ggml_backend_tensor_copy(src, dst);
  1444. }
  1445. // init src
  1446. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1447. struct ggml_tensor * s = src->src[i];
  1448. if (s == NULL) {
  1449. continue;
  1450. }
  1451. graph_copy_init_tensor(hash_set, node_copies, node_init, s);
  1452. }
  1453. }
  1454. struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
  1455. struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size);
  1456. struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
  1457. bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
  1458. struct ggml_init_params params = {
  1459. /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
  1460. /* .mem_buffer = */ NULL,
  1461. /* .no_alloc = */ true
  1462. };
  1463. struct ggml_context * ctx_allocated = ggml_init(params);
  1464. struct ggml_context * ctx_unallocated = ggml_init(params);
  1465. if (ctx_allocated == NULL || ctx_unallocated == NULL) {
  1466. GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
  1467. ggml_hash_set_free(&hash_set);
  1468. free(node_copies);
  1469. free(node_init);
  1470. ggml_free(ctx_allocated);
  1471. ggml_free(ctx_unallocated);
  1472. return {
  1473. /* .buffer = */ NULL,
  1474. /* .ctx_allocated = */ NULL,
  1475. /* .ctx_unallocated = */ NULL,
  1476. /* .graph = */ NULL,
  1477. };
  1478. }
  1479. // dup nodes
  1480. for (int i = 0; i < graph->n_nodes; i++) {
  1481. struct ggml_tensor * node = graph->nodes[i];
  1482. graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
  1483. }
  1484. // allocate nodes
  1485. ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
  1486. if (buffer == NULL) {
  1487. GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
  1488. ggml_hash_set_free(&hash_set);
  1489. free(node_copies);
  1490. free(node_init);
  1491. ggml_free(ctx_allocated);
  1492. ggml_free(ctx_unallocated);
  1493. return {
  1494. /* .buffer = */ NULL,
  1495. /* .ctx_allocated = */ NULL,
  1496. /* .ctx_unallocated = */ NULL,
  1497. /* .graph = */ NULL,
  1498. };
  1499. }
  1500. //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
  1501. // copy data and init views
  1502. for (int i = 0; i < graph->n_nodes; i++) {
  1503. struct ggml_tensor * node = graph->nodes[i];
  1504. graph_copy_init_tensor(&hash_set, node_copies, node_init, node);
  1505. }
  1506. // build graph copy
  1507. struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false);
  1508. for (int i = 0; i < graph->n_nodes; i++) {
  1509. struct ggml_tensor * node = graph->nodes[i];
  1510. struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)];
  1511. graph_copy->nodes[i] = node_copy;
  1512. }
  1513. graph_copy->n_nodes = graph->n_nodes;
  1514. ggml_hash_set_free(&hash_set);
  1515. free(node_copies);
  1516. free(node_init);
  1517. return {
  1518. /* .buffer = */ buffer,
  1519. /* .ctx_allocated = */ ctx_allocated,
  1520. /* .ctx_unallocated = */ ctx_unallocated,
  1521. /* .graph = */ graph_copy,
  1522. };
  1523. }
  1524. void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
  1525. ggml_backend_buffer_free(copy.buffer);
  1526. ggml_free(copy.ctx_allocated);
  1527. ggml_free(copy.ctx_unallocated);
  1528. }
  1529. bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
  1530. struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
  1531. if (copy.buffer == NULL) {
  1532. return false;
  1533. }
  1534. struct ggml_cgraph * g1 = graph;
  1535. struct ggml_cgraph * g2 = copy.graph;
  1536. assert(g1->n_nodes == g2->n_nodes);
  1537. for (int i = 0; i < g1->n_nodes; i++) {
  1538. struct ggml_tensor * t1 = g1->nodes[i];
  1539. struct ggml_tensor * t2 = g2->nodes[i];
  1540. assert(t1->op == t2->op && ggml_are_same_layout(t1, t2));
  1541. struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
  1542. struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
  1543. ggml_backend_graph_compute(backend1, &g1v);
  1544. ggml_backend_graph_compute(backend2, &g2v);
  1545. if (ggml_is_view_op(t1->op)) {
  1546. continue;
  1547. }
  1548. // compare results, calculate rms etc
  1549. if (!callback(i, t1, t2, user_data)) {
  1550. break;
  1551. }
  1552. }
  1553. ggml_backend_graph_copy_free(copy);
  1554. return true;
  1555. }
  1556. // CPU backend - buffer
  1557. static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
  1558. uintptr_t data = (uintptr_t)buffer->context;
  1559. // align the buffer
  1560. if (data % TENSOR_ALIGNMENT != 0) {
  1561. data = GGML_PAD(data, TENSOR_ALIGNMENT);
  1562. }
  1563. return (void *)data;
  1564. }
  1565. static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  1566. ggml_aligned_free(buffer->context, buffer->size);
  1567. }
  1568. static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  1569. memset((char *)tensor->data + offset, value, size);
  1570. GGML_UNUSED(buffer);
  1571. }
  1572. static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  1573. memcpy((char *)tensor->data + offset, data, size);
  1574. GGML_UNUSED(buffer);
  1575. }
  1576. static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  1577. memcpy(data, (const char *)tensor->data + offset, size);
  1578. GGML_UNUSED(buffer);
  1579. }
  1580. static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  1581. if (ggml_backend_buffer_is_host(src->buffer)) {
  1582. memcpy(dst->data, src->data, ggml_nbytes(src));
  1583. return true;
  1584. }
  1585. return false;
  1586. GGML_UNUSED(buffer);
  1587. }
  1588. static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  1589. memset(buffer->context, value, buffer->size);
  1590. }
  1591. static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = {
  1592. /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
  1593. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  1594. /* .init_tensor = */ NULL, // no initialization required
  1595. /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor,
  1596. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  1597. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  1598. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  1599. /* .clear = */ ggml_backend_cpu_buffer_clear,
  1600. /* .reset = */ NULL,
  1601. };
  1602. static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = {
  1603. /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
  1604. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  1605. /* .init_tensor = */ NULL, // no initialization required
  1606. /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor,
  1607. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  1608. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  1609. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  1610. /* .clear = */ ggml_backend_cpu_buffer_clear,
  1611. /* .reset = */ NULL,
  1612. };
  1613. // CPU backend buffer type
  1614. // this buffer type is defined here to make it available to all backends
  1615. static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  1616. return "CPU";
  1617. GGML_UNUSED(buft);
  1618. }
  1619. static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  1620. void * data = ggml_aligned_malloc(size);
  1621. if (data == NULL) {
  1622. GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
  1623. return NULL;
  1624. }
  1625. return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size);
  1626. }
  1627. static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  1628. return TENSOR_ALIGNMENT;
  1629. GGML_UNUSED(buft);
  1630. }
  1631. static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  1632. return true;
  1633. GGML_UNUSED(buft);
  1634. }
  1635. ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
  1636. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
  1637. /* .iface = */ {
  1638. /* .get_name = */ ggml_backend_cpu_buffer_type_get_name,
  1639. /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
  1640. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  1641. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1642. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  1643. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  1644. },
  1645. /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
  1646. /* .context = */ NULL,
  1647. };
  1648. return &ggml_backend_cpu_buffer_type;
  1649. }
  1650. static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) {
  1651. return "CPU_Mapped";
  1652. GGML_UNUSED(buft);
  1653. }
  1654. static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) {
  1655. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
  1656. /* .iface = */ {
  1657. /* .get_name = */ ggml_backend_cpu_buffer_from_ptr_type_get_name,
  1658. /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
  1659. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  1660. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1661. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  1662. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  1663. },
  1664. /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
  1665. /* .context = */ NULL,
  1666. };
  1667. return &ggml_backend_cpu_buffer_type;
  1668. }
  1669. ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
  1670. GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
  1671. return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size);
  1672. }