ggml-backend.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. #include "ggml-backend-impl.h"
  2. #include "ggml-alloc.h"
  3. #include "ggml-impl.h"
  4. #include <assert.h>
  5. #include <limits.h>
  6. #include <stdarg.h>
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <string.h>
  10. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  11. // backend buffer type
  12. const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
  13. return buft->iface.get_name(buft);
  14. }
  15. GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  16. return buft->iface.alloc_buffer(buft, size);
  17. }
  18. size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
  19. return buft->iface.get_alignment(buft);
  20. }
  21. size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
  22. // get_max_size is optional, defaults to SIZE_MAX
  23. if (buft->iface.get_max_size) {
  24. return buft->iface.get_max_size(buft);
  25. }
  26. return SIZE_MAX;
  27. }
  28. GGML_CALL size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
  29. // get_alloc_size is optional, defaults to ggml_nbytes
  30. if (buft->iface.get_alloc_size) {
  31. size_t size = buft->iface.get_alloc_size(buft, tensor);
  32. assert(size >= ggml_nbytes(tensor));
  33. return size;
  34. }
  35. return ggml_nbytes(tensor);
  36. }
  37. bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  38. return buft->iface.supports_backend(buft, backend);
  39. }
  40. bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
  41. if (buft->iface.is_host) {
  42. return buft->iface.is_host(buft);
  43. }
  44. return false;
  45. }
  46. // backend buffer
  47. GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init(
  48. ggml_backend_buffer_type_t buft,
  49. struct ggml_backend_buffer_i iface,
  50. ggml_backend_buffer_context_t context,
  51. size_t size) {
  52. ggml_backend_buffer_t buffer = malloc(sizeof(struct ggml_backend_buffer));
  53. (*buffer) = (struct ggml_backend_buffer) {
  54. /* .interface = */ iface,
  55. /* .buft = */ buft,
  56. /* .context = */ context,
  57. /* .size = */ size,
  58. /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
  59. };
  60. return buffer;
  61. }
  62. const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
  63. return buffer->iface.get_name(buffer);
  64. }
  65. void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
  66. if (buffer == NULL) {
  67. return;
  68. }
  69. if (buffer->iface.free_buffer != NULL) {
  70. buffer->iface.free_buffer(buffer);
  71. }
  72. free(buffer);
  73. }
  74. size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
  75. return buffer->size;
  76. }
  77. void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
  78. void * base = buffer->iface.get_base(buffer);
  79. GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
  80. return base;
  81. }
  82. GGML_CALL void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  83. // init_tensor is optional
  84. if (buffer->iface.init_tensor) {
  85. buffer->iface.init_tensor(buffer, tensor);
  86. }
  87. }
  88. size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer) {
  89. return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
  90. }
  91. size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
  92. return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
  93. }
  94. size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  95. return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
  96. }
  97. void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  98. buffer->iface.clear(buffer, value);
  99. }
  100. bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
  101. return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
  102. }
  103. void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  104. buffer->usage = usage;
  105. // FIXME: add a generic callback to the buffer interface
  106. if (ggml_backend_buffer_is_multi_buffer(buffer)) {
  107. ggml_backend_multi_buffer_set_usage(buffer, usage);
  108. }
  109. }
  110. ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
  111. return buffer->buft;
  112. }
  113. void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
  114. if (buffer->iface.reset) {
  115. buffer->iface.reset(buffer);
  116. }
  117. }
  118. bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
  119. ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
  120. if (dst_buf->iface.cpy_tensor) {
  121. return src->buffer->iface.cpy_tensor(dst_buf, src, dst);
  122. }
  123. return false;
  124. }
  125. // backend
  126. const char * ggml_backend_name(ggml_backend_t backend) {
  127. if (backend == NULL) {
  128. return "NULL";
  129. }
  130. return backend->iface.get_name(backend);
  131. }
  132. void ggml_backend_free(ggml_backend_t backend) {
  133. if (backend == NULL) {
  134. return;
  135. }
  136. backend->iface.free(backend);
  137. }
  138. ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) {
  139. return backend->iface.get_default_buffer_type(backend);
  140. }
  141. ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
  142. return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size);
  143. }
  144. size_t ggml_backend_get_alignment(ggml_backend_t backend) {
  145. return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend));
  146. }
  147. size_t ggml_backend_get_max_size(ggml_backend_t backend) {
  148. return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend));
  149. }
  150. void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  151. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  152. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  153. if (backend->iface.set_tensor_async == NULL) {
  154. ggml_backend_tensor_set(tensor, data, offset, size);
  155. } else {
  156. backend->iface.set_tensor_async(backend, tensor, data, offset, size);
  157. }
  158. }
  159. void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  160. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  161. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  162. if (backend->iface.get_tensor_async == NULL) {
  163. ggml_backend_tensor_get(tensor, data, offset, size);
  164. } else {
  165. backend->iface.get_tensor_async(backend, tensor, data, offset, size);
  166. }
  167. }
  168. GGML_CALL void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  169. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  170. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  171. GGML_ASSERT(buf != NULL && "tensor buffer not set");
  172. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
  173. tensor->buffer->iface.set_tensor(buf, tensor, data, offset, size);
  174. }
  175. GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  176. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  177. GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
  178. GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
  179. GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
  180. tensor->buffer->iface.get_tensor(buf, tensor, data, offset, size);
  181. }
  182. void ggml_backend_synchronize(ggml_backend_t backend) {
  183. if (backend->iface.synchronize == NULL) {
  184. return;
  185. }
  186. backend->iface.synchronize(backend);
  187. }
  188. ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  189. return backend->iface.graph_plan_create(backend, cgraph);
  190. }
  191. void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  192. backend->iface.graph_plan_free(backend, plan);
  193. }
  194. void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  195. backend->iface.graph_plan_compute(backend, plan);
  196. }
  197. bool ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  198. return backend->iface.graph_compute(backend, cgraph);
  199. }
  200. bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  201. return backend->iface.supports_op(backend, op);
  202. }
  203. // backend copy
  204. static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
  205. if (a->type != b->type) {
  206. return false;
  207. }
  208. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  209. if (a->ne[i] != b->ne[i]) {
  210. return false;
  211. }
  212. if (a->nb[i] != b->nb[i]) {
  213. return false;
  214. }
  215. }
  216. return true;
  217. }
  218. void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
  219. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  220. if (src == dst) {
  221. return;
  222. }
  223. if (ggml_backend_buffer_is_host(src->buffer)) {
  224. ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
  225. } else if (ggml_backend_buffer_is_host(dst->buffer)) {
  226. ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
  227. } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
  228. #ifndef NDEBUG
  229. fprintf(stderr, "%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
  230. #endif
  231. size_t nbytes = ggml_nbytes(src);
  232. void * data = malloc(nbytes);
  233. ggml_backend_tensor_get(src, data, 0, nbytes);
  234. ggml_backend_tensor_set(dst, data, 0, nbytes);
  235. free(data);
  236. }
  237. }
  238. void ggml_backend_tensor_copy_async(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
  239. GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
  240. if (src == dst) {
  241. return;
  242. }
  243. if (ggml_backend_buft_supports_backend(src->buffer->buft, backend) && ggml_backend_buft_supports_backend(dst->buffer->buft, backend)) {
  244. if (backend->iface.cpy_tensor_async != NULL) {
  245. if (backend->iface.cpy_tensor_async(backend, src, dst)) {
  246. return;
  247. }
  248. }
  249. }
  250. size_t nbytes = ggml_nbytes(src);
  251. if (ggml_backend_buffer_is_host(src->buffer)) {
  252. ggml_backend_tensor_set_async(backend, dst, src->data, 0, nbytes);
  253. }
  254. else {
  255. ggml_backend_tensor_copy(src, dst);
  256. }
  257. }
  258. // backend registry
  259. #define GGML_MAX_BACKENDS_REG 16
  260. struct ggml_backend_reg {
  261. char name[128];
  262. ggml_backend_init_fn init_fn;
  263. ggml_backend_buffer_type_t default_buffer_type;
  264. void * user_data;
  265. };
  266. static struct ggml_backend_reg ggml_backend_registry[GGML_MAX_BACKENDS_REG];
  267. static size_t ggml_backend_registry_count = 0;
  268. GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data);
  269. GGML_CALL static void ggml_backend_registry_init(void) {
  270. static bool initialized = false;
  271. if (initialized) {
  272. return;
  273. }
  274. initialized = true;
  275. ggml_backend_register("CPU", ggml_backend_reg_cpu_init, ggml_backend_cpu_buffer_type(), NULL);
  276. // add forward decls here to avoid including the backend headers
  277. #ifdef GGML_USE_CUBLAS
  278. extern GGML_CALL void ggml_backend_cuda_reg_devices(void);
  279. ggml_backend_cuda_reg_devices();
  280. #endif
  281. #ifdef GGML_USE_SYCL
  282. extern void ggml_backend_sycl_reg_devices(void);
  283. ggml_backend_sycl_reg_devices();
  284. #endif
  285. #ifdef GGML_USE_METAL
  286. extern GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data);
  287. extern GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
  288. ggml_backend_register("Metal", ggml_backend_reg_metal_init, ggml_backend_metal_buffer_type(), NULL);
  289. #endif
  290. #ifdef GGML_USE_VULKAN
  291. extern GGML_CALL int ggml_backend_vk_reg_devices(void);
  292. ggml_backend_vk_reg_devices();
  293. #endif
  294. #ifdef GGML_USE_KOMPUTE
  295. extern GGML_CALL void ggml_backend_kompute_reg_devices(void);
  296. ggml_backend_kompute_reg_devices();
  297. #endif
  298. }
  299. GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {
  300. GGML_ASSERT(ggml_backend_registry_count < GGML_MAX_BACKENDS_REG);
  301. size_t id = ggml_backend_registry_count;
  302. ggml_backend_registry[id] = (struct ggml_backend_reg) {
  303. /* .name = */ {0},
  304. /* .fn = */ init_fn,
  305. /* .default_buffer_type = */ default_buffer_type,
  306. /* .user_data = */ user_data,
  307. };
  308. snprintf(ggml_backend_registry[id].name, sizeof(ggml_backend_registry[id].name), "%s", name);
  309. #ifndef NDEBUG
  310. fprintf(stderr, "%s: registered backend %s\n", __func__, name);
  311. #endif
  312. ggml_backend_registry_count++;
  313. }
  314. size_t ggml_backend_reg_get_count(void) {
  315. ggml_backend_registry_init();
  316. return ggml_backend_registry_count;
  317. }
  318. size_t ggml_backend_reg_find_by_name(const char * name) {
  319. ggml_backend_registry_init();
  320. for (size_t i = 0; i < ggml_backend_registry_count; i++) {
  321. // TODO: case insensitive in a portable way
  322. if (strcmp(ggml_backend_registry[i].name, name) == 0) {
  323. return i;
  324. }
  325. }
  326. // not found
  327. return SIZE_MAX;
  328. }
  329. // init from backend:params string
  330. ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str) {
  331. ggml_backend_registry_init();
  332. const char * params = strchr(backend_str, ':');
  333. char backend_name[128];
  334. if (params == NULL) {
  335. snprintf(backend_name, sizeof(backend_name), "%s", backend_str);
  336. params = "";
  337. } else {
  338. snprintf(backend_name, sizeof(backend_name), "%.*s", (int)(params - backend_str), backend_str);
  339. params++;
  340. }
  341. size_t backend_i = ggml_backend_reg_find_by_name(backend_name);
  342. if (backend_i == SIZE_MAX) {
  343. fprintf(stderr, "%s: backend %s not found\n", __func__, backend_name);
  344. return NULL;
  345. }
  346. return ggml_backend_reg_init_backend(backend_i, params);
  347. }
  348. const char * ggml_backend_reg_get_name(size_t i) {
  349. ggml_backend_registry_init();
  350. GGML_ASSERT(i < ggml_backend_registry_count);
  351. return ggml_backend_registry[i].name;
  352. }
  353. ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params) {
  354. ggml_backend_registry_init();
  355. GGML_ASSERT(i < ggml_backend_registry_count);
  356. return ggml_backend_registry[i].init_fn(params, ggml_backend_registry[i].user_data);
  357. }
  358. ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i) {
  359. ggml_backend_registry_init();
  360. GGML_ASSERT(i < ggml_backend_registry_count);
  361. return ggml_backend_registry[i].default_buffer_type;
  362. }
  363. ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size) {
  364. ggml_backend_registry_init();
  365. GGML_ASSERT(i < ggml_backend_registry_count);
  366. return ggml_backend_buft_alloc_buffer(ggml_backend_registry[i].default_buffer_type, size);
  367. }
  368. // backend CPU
  369. GGML_CALL static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t buffer) {
  370. return "CPU";
  371. GGML_UNUSED(buffer);
  372. }
  373. GGML_CALL static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
  374. return (void *)buffer->context;
  375. }
  376. GGML_CALL static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  377. free(buffer->context);
  378. }
  379. GGML_CALL static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  380. memcpy((char *)tensor->data + offset, data, size);
  381. GGML_UNUSED(buffer);
  382. }
  383. GGML_CALL static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  384. memcpy(data, (const char *)tensor->data + offset, size);
  385. GGML_UNUSED(buffer);
  386. }
  387. GGML_CALL static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  388. if (ggml_backend_buffer_is_host(src->buffer)) {
  389. memcpy(dst->data, src->data, ggml_nbytes(src));
  390. return true;
  391. }
  392. return false;
  393. GGML_UNUSED(buffer);
  394. }
  395. GGML_CALL static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  396. memset(buffer->context, value, buffer->size);
  397. }
  398. static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
  399. /* .get_name = */ ggml_backend_cpu_buffer_name,
  400. /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
  401. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  402. /* .init_tensor = */ NULL, // no initialization required
  403. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  404. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  405. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  406. /* .clear = */ ggml_backend_cpu_buffer_clear,
  407. /* .reset = */ NULL,
  408. };
  409. // for buffers from ptr, free is not called
  410. static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
  411. /* .get_name = */ ggml_backend_cpu_buffer_name,
  412. /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
  413. /* .get_base = */ ggml_backend_cpu_buffer_get_base,
  414. /* .init_tensor = */ NULL, // no initialization required
  415. /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
  416. /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
  417. /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
  418. /* .clear = */ ggml_backend_cpu_buffer_clear,
  419. /* .reset = */ NULL,
  420. };
  421. static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
  422. GGML_CALL static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  423. return "CPU";
  424. GGML_UNUSED(buft);
  425. }
  426. GGML_CALL static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  427. size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
  428. void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
  429. GGML_ASSERT(data != NULL && "failed to allocate buffer");
  430. return ggml_backend_buffer_init(buft, cpu_backend_buffer_i, data, size);
  431. }
  432. GGML_CALL static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  433. return TENSOR_ALIGNMENT;
  434. GGML_UNUSED(buft);
  435. }
  436. GGML_CALL static bool ggml_backend_cpu_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  437. return ggml_backend_is_cpu(backend);
  438. GGML_UNUSED(buft);
  439. }
  440. GGML_CALL static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  441. return true;
  442. GGML_UNUSED(buft);
  443. }
  444. GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
  445. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
  446. /* .iface = */ {
  447. /* .get_name = */ ggml_backend_cpu_buffer_type_get_name,
  448. /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
  449. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  450. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  451. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  452. /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend,
  453. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  454. },
  455. /* .context = */ NULL,
  456. };
  457. return &ggml_backend_cpu_buffer_type;
  458. }
  459. #ifdef GGML_USE_CPU_HBM
  460. // buffer type HBM
  461. #include <hbwmalloc.h>
  462. GGML_CALL static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  463. return "CPU_HBM";
  464. GGML_UNUSED(buft);
  465. }
  466. GGML_CALL static const char * ggml_backend_cpu_hbm_buffer_get_name(ggml_backend_buffer_t buf) {
  467. return "CPU_HBM";
  468. GGML_UNUSED(buf);
  469. }
  470. GGML_CALL static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  471. hbw_free(buffer->context);
  472. }
  473. GGML_CALL static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  474. //void * ptr = hbw_malloc(size);
  475. void * ptr;
  476. int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size);
  477. if (result != 0) {
  478. fprintf(stderr, "failed to allocate HBM buffer of size %zu\n", size);
  479. return NULL;
  480. }
  481. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  482. buffer->buft = buft;
  483. buffer->iface.get_name = ggml_backend_cpu_hbm_buffer_get_name;
  484. buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
  485. return buffer;
  486. }
  487. ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
  488. static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
  489. /* .iface = */ {
  490. /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
  491. /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
  492. /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
  493. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  494. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  495. /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend,
  496. /* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
  497. },
  498. /* .context = */ NULL,
  499. };
  500. return &ggml_backend_cpu_buffer_type_hbm;
  501. }
  502. #endif
  503. struct ggml_backend_cpu_context {
  504. int n_threads;
  505. void * work_data;
  506. size_t work_size;
  507. };
  508. GGML_CALL static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
  509. return "CPU";
  510. GGML_UNUSED(backend);
  511. }
  512. GGML_CALL static void ggml_backend_cpu_free(ggml_backend_t backend) {
  513. struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
  514. free(cpu_ctx->work_data);
  515. free(cpu_ctx);
  516. free(backend);
  517. }
  518. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_cpu_get_default_buffer_type(ggml_backend_t backend) {
  519. return ggml_backend_cpu_buffer_type();
  520. GGML_UNUSED(backend);
  521. }
  522. struct ggml_backend_plan_cpu {
  523. struct ggml_cplan cplan;
  524. struct ggml_cgraph cgraph;
  525. };
  526. GGML_CALL static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, const struct ggml_cgraph * cgraph) {
  527. struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
  528. struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
  529. cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
  530. cpu_plan->cgraph = *cgraph; // FIXME: deep copy
  531. if (cpu_plan->cplan.work_size > 0) {
  532. cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size);
  533. }
  534. return cpu_plan;
  535. }
  536. GGML_CALL static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  537. struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
  538. free(cpu_plan->cplan.work_data);
  539. free(cpu_plan);
  540. GGML_UNUSED(backend);
  541. }
  542. GGML_CALL static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
  543. struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
  544. ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
  545. GGML_UNUSED(backend);
  546. }
  547. GGML_CALL static bool ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  548. struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
  549. struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
  550. if (cpu_ctx->work_size < cplan.work_size) {
  551. // TODO: may be faster to free and use malloc to avoid the copy
  552. cpu_ctx->work_data = realloc(cpu_ctx->work_data, cplan.work_size);
  553. cpu_ctx->work_size = cplan.work_size;
  554. }
  555. cplan.work_data = cpu_ctx->work_data;
  556. ggml_graph_compute(cgraph, &cplan);
  557. return true;
  558. }
  559. GGML_CALL static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  560. switch (op->op) {
  561. case GGML_OP_CPY:
  562. return op->type != GGML_TYPE_IQ2_XXS && op->type != GGML_TYPE_IQ2_XS; // missing type_traits.from_float
  563. case GGML_OP_MUL_MAT:
  564. return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type;
  565. default:
  566. return true;
  567. }
  568. GGML_UNUSED(backend);
  569. }
  570. static struct ggml_backend_i cpu_backend_i = {
  571. /* .get_name = */ ggml_backend_cpu_name,
  572. /* .free = */ ggml_backend_cpu_free,
  573. /* .get_default_buffer_type = */ ggml_backend_cpu_get_default_buffer_type,
  574. /* .set_tensor_async = */ NULL,
  575. /* .get_tensor_async = */ NULL,
  576. /* .cpy_tensor_async = */ NULL,
  577. /* .synchronize = */ NULL,
  578. /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
  579. /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
  580. /* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute,
  581. /* .graph_compute = */ ggml_backend_cpu_graph_compute,
  582. /* .supports_op = */ ggml_backend_cpu_supports_op,
  583. };
  584. ggml_backend_t ggml_backend_cpu_init(void) {
  585. struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
  586. ctx->n_threads = GGML_DEFAULT_N_THREADS;
  587. ctx->work_data = NULL;
  588. ctx->work_size = 0;
  589. ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
  590. *cpu_backend = (struct ggml_backend) {
  591. /* .interface = */ cpu_backend_i,
  592. /* .context = */ ctx
  593. };
  594. return cpu_backend;
  595. }
  596. GGML_CALL bool ggml_backend_is_cpu(ggml_backend_t backend) {
  597. return backend && backend->iface.get_name == ggml_backend_cpu_name;
  598. }
  599. void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
  600. GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
  601. struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
  602. ctx->n_threads = n_threads;
  603. }
  604. GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
  605. return ggml_backend_buffer_init(ggml_backend_cpu_buffer_type(), cpu_backend_buffer_i_from_ptr, ptr, size);
  606. }
  607. GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data) {
  608. return ggml_backend_cpu_init();
  609. GGML_UNUSED(params);
  610. GGML_UNUSED(user_data);
  611. }
  612. // multi-buffer buffer
  613. struct ggml_backend_multi_buffer_context {
  614. ggml_backend_buffer_t * buffers;
  615. size_t n_buffers;
  616. };
  617. typedef struct ggml_backend_multi_buffer_context * ggml_backend_multi_buffer_context_t;
  618. GGML_CALL static const char * ggml_backend_multi_buffer_get_name(ggml_backend_buffer_t buffer) {
  619. ggml_backend_multi_buffer_context_t ctx = (ggml_backend_multi_buffer_context_t) buffer->context;
  620. return ctx->buffers[0]->iface.get_name(ctx->buffers[0]);
  621. }
  622. GGML_CALL static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  623. ggml_backend_multi_buffer_context_t ctx = (ggml_backend_multi_buffer_context_t) buffer->context;
  624. for (size_t i = 0; i < ctx->n_buffers; i++) {
  625. ggml_backend_buffer_free(ctx->buffers[i]);
  626. }
  627. free(ctx->buffers);
  628. free(ctx);
  629. }
  630. GGML_CALL static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  631. ggml_backend_multi_buffer_context_t ctx = (ggml_backend_multi_buffer_context_t) buffer->context;
  632. for (size_t i = 0; i < ctx->n_buffers; i++) {
  633. ggml_backend_buffer_clear(ctx->buffers[i], value);
  634. }
  635. }
  636. static struct ggml_backend_buffer_i ggml_backend_multi_buffer_context_interface(void) {
  637. static struct ggml_backend_buffer_i multi_backend_buffer_i = {
  638. /* .get_name = */ ggml_backend_multi_buffer_get_name,
  639. /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer,
  640. /* .get_base = */ NULL,
  641. /* .init_tensor = */ NULL,
  642. /* .set_tensor = */ NULL,
  643. /* .get_tensor = */ NULL,
  644. /* .cpy_tensor = */ NULL,
  645. /* .clear = */ ggml_backend_multi_buffer_clear,
  646. /* .reset = */ NULL,
  647. };
  648. return multi_backend_buffer_i;
  649. }
  650. GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) {
  651. ggml_backend_multi_buffer_context_t ctx = (ggml_backend_multi_buffer_context_t) malloc(sizeof(struct ggml_backend_multi_buffer_context));
  652. ctx->n_buffers = n_buffers;
  653. ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
  654. size_t total_size = 0;
  655. for (size_t i = 0; i < n_buffers; i++) {
  656. ctx->buffers[i] = buffers[i];
  657. total_size += ggml_backend_buffer_get_size(buffers[i]);
  658. }
  659. return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_context_interface(), ctx, total_size);
  660. }
  661. GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) {
  662. return buffer->iface.get_name == ggml_backend_multi_buffer_get_name;
  663. }
  664. GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
  665. GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer));
  666. ggml_backend_multi_buffer_context_t ctx = (ggml_backend_multi_buffer_context_t) buffer->context;
  667. for (size_t i = 0; i < ctx->n_buffers; i++) {
  668. ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
  669. }
  670. }
  671. // scheduler
  672. #define GGML_MAX_BACKENDS 16
  673. #define GGML_MAX_SPLITS 256
  674. #define GGML_MAX_SPLIT_INPUTS 16
  675. struct ggml_backend_sched_split {
  676. ggml_tallocr_t tallocr;
  677. int i_start;
  678. int i_end;
  679. struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
  680. int n_inputs;
  681. // graph view of this split
  682. struct ggml_cgraph graph;
  683. };
  684. struct ggml_backend_sched {
  685. bool is_reset; // true if the scheduler has been reset since the last graph split
  686. int n_backends;
  687. ggml_backend_t backends[GGML_MAX_BACKENDS];
  688. ggml_backend_buffer_type_t bufts[GGML_MAX_BACKENDS];
  689. ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
  690. ggml_gallocr_t galloc;
  691. // hash keys of the nodes in the graph
  692. struct ggml_hash_set hash_set;
  693. // hash values (arrays of [hash_set.size])
  694. ggml_tallocr_t * node_talloc; // tallocr assigned to each node (indirectly this is the backend)
  695. struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // copies of each node for each destination backend
  696. // copy of the graph with modified inputs
  697. struct ggml_cgraph * graph;
  698. struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
  699. int n_splits;
  700. struct ggml_context * ctx;
  701. // align context_buffer to GGML_MEM_ALIGN
  702. #ifdef _MSC_VER
  703. __declspec(align(GGML_MEM_ALIGN))
  704. #else
  705. __attribute__((aligned(GGML_MEM_ALIGN)))
  706. #endif
  707. char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)];
  708. ggml_backend_sched_eval_callback callback_eval;
  709. void * callback_eval_user_data;
  710. };
  711. #define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
  712. #define node_allocr(node) sched->node_talloc[hash_id(node)]
  713. static bool ggml_is_view_op(enum ggml_op op) {
  714. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  715. }
  716. // returns the priority of the backend, lower is better
  717. static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) {
  718. for (int i = 0; i < sched->n_backends; i++) {
  719. if (sched->backends[i] == backend) {
  720. return i;
  721. }
  722. }
  723. return INT_MAX;
  724. }
  725. static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
  726. for (int i = 0; i < sched->n_backends; i++) {
  727. if (sched->tallocs[i] == allocr) {
  728. return i;
  729. }
  730. }
  731. return INT_MAX;
  732. }
  733. static ggml_tallocr_t sched_allocr_from_buffer(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) {
  734. if (buffer == NULL) {
  735. return NULL;
  736. }
  737. // check if this is already allocate in a allocr buffer (from user manual allocations)
  738. for (int i = 0; i < sched->n_backends; i++) {
  739. if (ggml_tallocr_get_buffer(sched->tallocs[i]) == buffer) {
  740. return sched->tallocs[i];
  741. }
  742. }
  743. // find highest prio backend that supports the buffer type
  744. for (int i = 0; i < sched->n_backends; i++) {
  745. if (ggml_backend_buft_supports_backend(buffer->buft, sched->backends[i])) {
  746. return sched->tallocs[i];
  747. }
  748. }
  749. GGML_ASSERT(false && "tensor buffer type not supported by any backend");
  750. }
  751. static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
  752. if (allocr == NULL) {
  753. return NULL;
  754. }
  755. for (int i = 0; i < sched->n_backends; i++) {
  756. if (sched->tallocs[i] == allocr) {
  757. return sched->backends[i];
  758. }
  759. }
  760. GGML_UNREACHABLE();
  761. }
  762. #if 0
  763. static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug only
  764. #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
  765. #define GET_CAUSE(node) causes[hash_id(node)]
  766. #else
  767. #define SET_CAUSE(node, ...)
  768. #define GET_CAUSE(node) ""
  769. #endif
  770. // returns the backend that should be used for the node based on the current locations
  771. static ggml_tallocr_t sched_allocr_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
  772. // assign pre-allocated nodes to their backend
  773. // dst
  774. ggml_tallocr_t cur_allocr = sched_allocr_from_buffer(sched, node->buffer);
  775. if (cur_allocr != NULL) {
  776. SET_CAUSE(node, "1.dst");
  777. return cur_allocr;
  778. }
  779. // view_src
  780. if (node->view_src != NULL) {
  781. cur_allocr = sched_allocr_from_buffer(sched, node->view_src->buffer);
  782. if (cur_allocr != NULL) {
  783. SET_CAUSE(node, "1.vsrc");
  784. return cur_allocr;
  785. }
  786. }
  787. // assign nodes that use weights to the backend of the weights
  788. for (int i = 0; i < GGML_MAX_SRC; i++) {
  789. const struct ggml_tensor * src = node->src[i];
  790. if (src == NULL) {
  791. break;
  792. }
  793. if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
  794. ggml_tallocr_t src_allocr = sched_allocr_from_buffer(sched, src->buffer);
  795. // operations with weights are always run on the same backend as the weights
  796. SET_CAUSE(node, "1.wgt%d", i);
  797. return src_allocr;
  798. }
  799. }
  800. return NULL;
  801. }
  802. static char * fmt_size(size_t size) {
  803. static char buffer[128];
  804. if (size >= 1024*1024) {
  805. sprintf(buffer, "%zuM", size/1024/1024);
  806. } else {
  807. sprintf(buffer, "%zuK", size/1024);
  808. }
  809. return buffer;
  810. }
  811. static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  812. int cur_split = 0;
  813. for (int i = 0; i < graph->n_nodes; i++) {
  814. if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
  815. ggml_backend_t split_backend = get_allocr_backend(sched, sched->splits[cur_split].tallocr);
  816. fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
  817. sched->splits[cur_split].n_inputs);
  818. for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
  819. fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
  820. fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
  821. }
  822. fprintf(stderr, "\n");
  823. cur_split++;
  824. }
  825. struct ggml_tensor * node = graph->nodes[i];
  826. if (ggml_is_view_op(node->op)) {
  827. continue;
  828. }
  829. ggml_tallocr_t node_allocr = node_allocr(node);
  830. ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME:
  831. fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
  832. fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node));
  833. for (int j = 0; j < GGML_MAX_SRC; j++) {
  834. struct ggml_tensor * src = node->src[j];
  835. if (src == NULL) {
  836. break;
  837. }
  838. ggml_tallocr_t src_allocr = node_allocr(src);
  839. ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL;
  840. fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
  841. fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
  842. }
  843. fprintf(stderr, "\n");
  844. }
  845. }
  846. // creates a copy of the tensor with the same memory layout
  847. static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
  848. struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
  849. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  850. dup->nb[i] = tensor->nb[i];
  851. }
  852. return dup;
  853. }
  854. //#define DEBUG_PASS1
  855. //#define DEBUG_PASS2
  856. //#define DEBUG_PASS3
  857. //#define DEBUG_PASS4
  858. // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
  859. static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  860. // reset splits
  861. sched->n_splits = 0;
  862. sched->is_reset = false;
  863. struct ggml_init_params params = {
  864. /* .mem_size = */ sizeof(sched->context_buffer),
  865. /* .mem_buffer = */ sched->context_buffer,
  866. /* .no_alloc = */ true
  867. };
  868. ggml_free(sched->ctx);
  869. sched->ctx = ggml_init(params);
  870. if (sched->ctx == NULL) {
  871. fprintf(stderr, "%s: failed to initialize context\n", __func__);
  872. GGML_ASSERT(false);
  873. }
  874. // pass 1: assign backends to ops with pre-allocated inputs
  875. for (int i = 0; i < graph->n_leafs; i++) {
  876. struct ggml_tensor * leaf = graph->leafs[i];
  877. if (node_allocr(leaf) != NULL) {
  878. // do not overwrite user assignments
  879. continue;
  880. }
  881. node_allocr(leaf) = sched_allocr_from_cur(sched, leaf);
  882. }
  883. for (int i = 0; i < graph->n_nodes; i++) {
  884. struct ggml_tensor * node = graph->nodes[i];
  885. if (node_allocr(node) != NULL) {
  886. // do not overwrite user assignments
  887. continue;
  888. }
  889. node_allocr(node) = sched_allocr_from_cur(sched, node);
  890. // src
  891. for (int j = 0; j < GGML_MAX_SRC; j++) {
  892. struct ggml_tensor * src = node->src[j];
  893. if (src == NULL) {
  894. break;
  895. }
  896. if (node_allocr(src) == NULL) {
  897. node_allocr(src) = sched_allocr_from_cur(sched, src);
  898. }
  899. }
  900. }
  901. #ifdef DEBUG_PASS1
  902. fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
  903. #endif
  904. // pass 2: expand current backend assignments
  905. // assign the same backend to adjacent nodes
  906. // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
  907. // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
  908. // pass 2.1 expand gpu up
  909. {
  910. ggml_tallocr_t cur_allocr = NULL;
  911. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  912. struct ggml_tensor * node = graph->nodes[i];
  913. if (ggml_is_view_op(node->op)) {
  914. continue;
  915. }
  916. ggml_tallocr_t node_allocr = node_allocr(node);
  917. if (node_allocr != NULL) {
  918. if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
  919. // skip cpu (lowest prio backend)
  920. cur_allocr = NULL;
  921. } else {
  922. cur_allocr = node_allocr;
  923. }
  924. } else {
  925. node_allocr(node) = cur_allocr;
  926. SET_CAUSE(node, "2.1");
  927. }
  928. }
  929. }
  930. // pass 2.2 expand gpu down
  931. {
  932. ggml_tallocr_t cur_allocr = NULL;
  933. for (int i = 0; i < graph->n_nodes; i++) {
  934. struct ggml_tensor * node = graph->nodes[i];
  935. if (ggml_is_view_op(node->op)) {
  936. continue;
  937. }
  938. ggml_tallocr_t node_allocr = node_allocr(node);
  939. if (node_allocr != NULL) {
  940. if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
  941. // skip cpu (lowest prio backend)
  942. cur_allocr = NULL;
  943. } else {
  944. cur_allocr = node_allocr;
  945. }
  946. } else {
  947. node_allocr(node) = cur_allocr;
  948. SET_CAUSE(node, "2.2");
  949. }
  950. }
  951. }
  952. // pass 2.3 expand rest up
  953. {
  954. ggml_tallocr_t cur_allocr = NULL;
  955. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  956. struct ggml_tensor * node = graph->nodes[i];
  957. if (ggml_is_view_op(node->op)) {
  958. continue;
  959. }
  960. ggml_tallocr_t node_allocr = node_allocr(node);
  961. if (node_allocr != NULL) {
  962. cur_allocr = node_allocr;
  963. } else {
  964. node_allocr(node) = cur_allocr;
  965. SET_CAUSE(node, "2.3");
  966. }
  967. }
  968. }
  969. // pass 2.4 expand rest down
  970. {
  971. ggml_tallocr_t cur_allocr = NULL;
  972. for (int i = 0; i < graph->n_nodes; i++) {
  973. struct ggml_tensor * node = graph->nodes[i];
  974. if (ggml_is_view_op(node->op)) {
  975. continue;
  976. }
  977. ggml_tallocr_t node_allocr = node_allocr(node);
  978. if (node_allocr != NULL) {
  979. cur_allocr = node_allocr;
  980. } else {
  981. node_allocr(node) = cur_allocr;
  982. SET_CAUSE(node, "2.4");
  983. }
  984. }
  985. }
  986. #ifdef DEBUG_PASS2
  987. fprintf(stderr, "PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
  988. #endif
  989. // pass 3: assign backends to remaining src from dst and view_src
  990. for (int i = 0; i < graph->n_nodes; i++) {
  991. struct ggml_tensor * node = graph->nodes[i];
  992. ggml_tallocr_t cur_allocr = node_allocr(node);
  993. if (node->view_src != NULL && cur_allocr == NULL) {
  994. cur_allocr = node_allocr(node) = node_allocr(node->view_src);
  995. SET_CAUSE(node, "3.vsrc");
  996. }
  997. for (int j = 0; j < GGML_MAX_SRC; j++) {
  998. struct ggml_tensor * src = node->src[j];
  999. if (src == NULL) {
  1000. break;
  1001. }
  1002. ggml_tallocr_t src_allocr = node_allocr(src);
  1003. if (src_allocr == NULL) {
  1004. if (src->view_src != NULL) {
  1005. // views are always on the same backend as the source
  1006. node_allocr(src) = node_allocr(src->view_src);
  1007. SET_CAUSE(src, "3.vsrc");
  1008. } else {
  1009. node_allocr(src) = cur_allocr;
  1010. SET_CAUSE(src, "3.cur");
  1011. }
  1012. }
  1013. }
  1014. }
  1015. #ifdef DEBUG_PASS3
  1016. fprintf(stderr, "PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
  1017. #endif
  1018. // pass 4: split graph, find tensors that need to be copied
  1019. {
  1020. int cur_split = 0;
  1021. // find the backend of the first split, skipping view ops
  1022. for (int i = 0; i < graph->n_nodes; i++) {
  1023. struct ggml_tensor * node = graph->nodes[i];
  1024. if (!ggml_is_view_op(node->op)) {
  1025. sched->splits[0].tallocr = node_allocr(node);
  1026. break;
  1027. }
  1028. }
  1029. sched->splits[0].i_start = 0;
  1030. sched->splits[0].n_inputs = 0;
  1031. memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
  1032. ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
  1033. size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
  1034. for (int i = 0; i < graph->n_nodes; i++) {
  1035. struct ggml_tensor * node = graph->nodes[i];
  1036. if (ggml_is_view_op(node->op)) {
  1037. continue;
  1038. }
  1039. ggml_tallocr_t node_allocr = node_allocr(node);
  1040. GGML_ASSERT(node_allocr != NULL); // all nodes should be assigned by now
  1041. if (node_allocr != cur_allocr) {
  1042. sched->splits[cur_split].i_end = i;
  1043. cur_split++;
  1044. GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
  1045. sched->splits[cur_split].tallocr = node_allocr;
  1046. sched->splits[cur_split].i_start = i;
  1047. sched->splits[cur_split].n_inputs = 0;
  1048. cur_allocr = node_allocr;
  1049. cur_backend_id = sched_allocr_prio(sched, cur_allocr);
  1050. }
  1051. // find inputs that are not on the same backend
  1052. for (int j = 0; j < GGML_MAX_SRC; j++) {
  1053. struct ggml_tensor * src = node->src[j];
  1054. if (src == NULL) {
  1055. break;
  1056. }
  1057. ggml_tallocr_t src_allocr = node_allocr(src);
  1058. GGML_ASSERT(src_allocr != NULL); // all inputs should be assigned by now
  1059. if (src_allocr != node_allocr) {
  1060. // create a copy of the input in the split's backend
  1061. size_t id = hash_id(src);
  1062. if (sched->node_copies[id][cur_backend_id] == NULL) {
  1063. ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
  1064. struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
  1065. ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
  1066. sched->node_copies[id][cur_backend_id] = tensor_copy;
  1067. node_allocr(tensor_copy) = cur_allocr;
  1068. SET_CAUSE(tensor_copy, "4.cpy");
  1069. int n_inputs = sched->splits[cur_split].n_inputs++;
  1070. GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
  1071. sched->splits[cur_split].inputs[n_inputs] = src;
  1072. }
  1073. node->src[j] = sched->node_copies[id][cur_backend_id];
  1074. #if 0
  1075. // check if the input is already in the split
  1076. bool found = false;
  1077. for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) {
  1078. if (sched->splits[cur_split].inputs[k] == src) {
  1079. found = true;
  1080. break;
  1081. }
  1082. }
  1083. if (!found) {
  1084. int n_inputs = sched->splits[cur_split].n_inputs++;
  1085. //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr)));
  1086. GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
  1087. sched->splits[cur_split].inputs[n_inputs] = src;
  1088. }
  1089. #endif
  1090. }
  1091. }
  1092. }
  1093. sched->splits[cur_split].i_end = graph->n_nodes;
  1094. sched->n_splits = cur_split + 1;
  1095. }
  1096. #ifdef DEBUG_PASS4
  1097. fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
  1098. #endif
  1099. #ifndef NDEBUG
  1100. // sanity check: all sources should have the same backend as the node
  1101. for (int i = 0; i < graph->n_nodes; i++) {
  1102. struct ggml_tensor * node = graph->nodes[i];
  1103. ggml_tallocr_t node_allocr = node_allocr(node);
  1104. if (node_allocr == NULL) {
  1105. fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
  1106. }
  1107. if (node->view_src != NULL && node_allocr != node_allocr(node->view_src)) {
  1108. fprintf(stderr, "!!!!!!! %s has backend %s, view_src %s has backend %s\n",
  1109. node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
  1110. node->view_src->name, node_allocr(node->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(node->view_src))) : "NULL");
  1111. }
  1112. for (int j = 0; j < GGML_MAX_SRC; j++) {
  1113. struct ggml_tensor * src = node->src[j];
  1114. if (src == NULL) {
  1115. break;
  1116. }
  1117. ggml_tallocr_t src_allocr = node_allocr(src);
  1118. if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now
  1119. fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n",
  1120. node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
  1121. j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL");
  1122. }
  1123. if (src->view_src != NULL && src_allocr != node_allocr(src->view_src)) {
  1124. fprintf(stderr, "!!!!!!! [src] %s has backend %s, view_src %s has backend %s\n",
  1125. src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL",
  1126. src->view_src->name, node_allocr(src->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(src->view_src))) : "NULL");
  1127. }
  1128. }
  1129. }
  1130. fflush(stderr);
  1131. #endif
  1132. // create copies of the graph for each split
  1133. // FIXME: avoid this copy, pass split inputs to ggml_gallocr_alloc_graph_n in some other way
  1134. struct ggml_cgraph * graph_copy = ggml_new_graph_custom(sched->ctx, graph->n_nodes + sched->n_splits*GGML_MAX_SPLIT_INPUTS, false);
  1135. for (int i = 0; i < sched->n_splits; i++) {
  1136. struct ggml_backend_sched_split * split = &sched->splits[i];
  1137. split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
  1138. // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
  1139. for (int j = 0; j < split->n_inputs; j++) {
  1140. struct ggml_tensor * input = split->inputs[j];
  1141. struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
  1142. // add a dependency to the input source so that it is not freed before the copy is done
  1143. GGML_ASSERT(input_cpy->src[0] == NULL || input_cpy->src[0] == input);
  1144. input_cpy->src[0] = input;
  1145. graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
  1146. }
  1147. for (int j = split->i_start; j < split->i_end; j++) {
  1148. graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
  1149. }
  1150. }
  1151. sched->graph = graph_copy;
  1152. }
  1153. static void sched_alloc_splits(ggml_backend_sched_t sched) {
  1154. ggml_gallocr_alloc_graph_n(
  1155. sched->galloc,
  1156. sched->graph,
  1157. sched->hash_set,
  1158. sched->node_talloc);
  1159. }
  1160. static void sched_compute_splits(ggml_backend_sched_t sched) {
  1161. uint64_t copy_us[GGML_MAX_BACKENDS] = {0};
  1162. uint64_t compute_us[GGML_MAX_BACKENDS] = {0};
  1163. struct ggml_backend_sched_split * splits = sched->splits;
  1164. for (int i = 0; i < sched->n_splits; i++) {
  1165. struct ggml_backend_sched_split * split = &splits[i];
  1166. ggml_backend_t split_backend = get_allocr_backend(sched, split->tallocr);
  1167. int split_backend_id = sched_backend_prio(sched, split_backend);
  1168. // copy the input tensors to the split backend
  1169. uint64_t copy_start_us = ggml_time_us();
  1170. for (int j = 0; j < split->n_inputs; j++) {
  1171. struct ggml_tensor * input = split->inputs[j];
  1172. struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][split_backend_id];
  1173. GGML_ASSERT(input->buffer != NULL);
  1174. GGML_ASSERT(input_cpy->buffer != NULL);
  1175. // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change
  1176. // this is important to avoid copying constants such as KQ_mask and inp_pos multiple times
  1177. ggml_backend_tensor_copy_async(split_backend, input, input_cpy);
  1178. }
  1179. //ggml_backend_synchronize(split_backend); // necessary to measure copy time
  1180. int64_t copy_end_us = ggml_time_us();
  1181. copy_us[split_backend_id] += copy_end_us - copy_start_us;
  1182. #if 0
  1183. char split_filename[GGML_MAX_NAME];
  1184. snprintf(split_filename, GGML_MAX_NAME, "split_%i_%s.dot", i, ggml_backend_name(split_backend));
  1185. ggml_graph_dump_dot(split->graph, NULL, split_filename);
  1186. #endif
  1187. uint64_t compute_start_us = ggml_time_us();
  1188. if (!sched->callback_eval) {
  1189. ggml_backend_graph_compute(split_backend, &split->graph);
  1190. //ggml_backend_synchronize(split_backend); // necessary to measure compute time
  1191. } else {
  1192. // similar to ggml_backend_compare_graph_backend
  1193. for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
  1194. struct ggml_tensor * t = split->graph.nodes[j0];
  1195. // check if the user needs data from this node
  1196. bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1197. int j1 = j0;
  1198. // determine the range [j0, j1] of nodes that can be computed together
  1199. while (!need && j1 < split->graph.n_nodes - 1) {
  1200. t = split->graph.nodes[++j1];
  1201. need = sched->callback_eval(t, true, sched->callback_eval_user_data);
  1202. }
  1203. struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
  1204. ggml_backend_graph_compute(split_backend, &gv);
  1205. if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
  1206. break;
  1207. }
  1208. j0 = j1;
  1209. }
  1210. }
  1211. uint64_t compute_end_us = ggml_time_us();
  1212. compute_us[split_backend_id] += compute_end_us - compute_start_us;
  1213. }
  1214. #if 0
  1215. // per-backend timings
  1216. fprintf(stderr, "sched_compute_splits times (%d splits):\n", sched->n_splits);
  1217. for (int i = 0; i < sched->n_backends; i++) {
  1218. if (copy_us[i] > 0 || compute_us[i] > 0) {
  1219. fprintf(stderr, "\t%5.5s: %lu us copy, %lu us compute\n", ggml_backend_name(sched->backends[i]), copy_us[i], compute_us[i]);
  1220. }
  1221. }
  1222. #endif
  1223. }
  1224. static void sched_reset(ggml_backend_sched_t sched) {
  1225. for (int i = 0; i < sched->n_backends; i++) {
  1226. ggml_tallocr_reset(sched->tallocs[i]);
  1227. }
  1228. // reset state for the next run
  1229. size_t hash_size = sched->hash_set.size;
  1230. memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
  1231. memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
  1232. memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
  1233. sched->is_reset = true;
  1234. }
  1235. ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size) {
  1236. GGML_ASSERT(n_backends > 0);
  1237. GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
  1238. struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1);
  1239. // initialize hash table
  1240. sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
  1241. sched->node_talloc = calloc(sizeof(sched->node_talloc[0]) * sched->hash_set.size, 1);
  1242. sched->node_copies = calloc(sizeof(sched->node_copies[0]) * sched->hash_set.size, 1);
  1243. sched->n_backends = n_backends;
  1244. for (int i = 0; i < n_backends; i++) {
  1245. sched->backends[i] = backends[i];
  1246. sched->bufts[i] = bufts ? bufts[i] : ggml_backend_get_default_buffer_type(backends[i]);
  1247. }
  1248. sched->galloc = ggml_gallocr_new();
  1249. // init measure allocs for each backend
  1250. for (int i = 0; i < n_backends; i++) {
  1251. sched->tallocs[i] = ggml_tallocr_new_measure_from_buft(sched->bufts[i]);
  1252. }
  1253. sched_reset(sched);
  1254. return sched;
  1255. }
  1256. void ggml_backend_sched_free(ggml_backend_sched_t sched) {
  1257. if (sched == NULL) {
  1258. return;
  1259. }
  1260. for (int i = 0; i < sched->n_backends; i++) {
  1261. ggml_tallocr_free(sched->tallocs[i]);
  1262. }
  1263. ggml_gallocr_free(sched->galloc);
  1264. ggml_free(sched->ctx);
  1265. free(sched->hash_set.keys);
  1266. free(sched->node_talloc);
  1267. free(sched->node_copies);
  1268. free(sched);
  1269. }
  1270. void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
  1271. GGML_ASSERT(ggml_tallocr_is_measure(sched->tallocs[0])); // can only be initialized once
  1272. sched_split_graph(sched, measure_graph);
  1273. sched_alloc_splits(sched);
  1274. // allocate buffers and reset allocators
  1275. for (int i = 0; i < sched->n_backends; i++) {
  1276. size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
  1277. ggml_tallocr_free(sched->tallocs[i]);
  1278. sched->tallocs[i] = ggml_tallocr_new_from_buft(sched->bufts[i], size);
  1279. }
  1280. sched_reset(sched);
  1281. }
  1282. void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
  1283. GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
  1284. if (!sched->is_reset) {
  1285. sched_reset(sched);
  1286. }
  1287. sched_split_graph(sched, graph);
  1288. sched_alloc_splits(sched);
  1289. sched_compute_splits(sched);
  1290. }
  1291. void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
  1292. sched_reset(sched);
  1293. }
  1294. void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
  1295. sched->callback_eval = callback;
  1296. sched->callback_eval_user_data = user_data;
  1297. }
  1298. int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
  1299. return sched->n_splits;
  1300. }
  1301. ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
  1302. int backend_index = sched_backend_prio(sched, backend);
  1303. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1304. return sched->tallocs[backend_index];
  1305. }
  1306. ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
  1307. int backend_index = sched_backend_prio(sched, backend);
  1308. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1309. return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
  1310. }
  1311. void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
  1312. int backend_index = sched_backend_prio(sched, backend);
  1313. GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
  1314. node_allocr(node) = sched->tallocs[backend_index];
  1315. }
  1316. ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
  1317. ggml_tallocr_t allocr = node_allocr(node);
  1318. if (allocr == NULL) {
  1319. return NULL;
  1320. }
  1321. return get_allocr_backend(sched, allocr);
  1322. }
  1323. // utils
  1324. void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  1325. GGML_ASSERT(tensor->buffer == NULL);
  1326. //GGML_ASSERT(tensor->data == NULL); // views of pre-allocated tensors may have the data set in ggml_new_tensor, but still need to be initialized by the backend
  1327. GGML_ASSERT(tensor->view_src != NULL);
  1328. GGML_ASSERT(tensor->view_src->buffer != NULL);
  1329. GGML_ASSERT(tensor->view_src->data != NULL);
  1330. tensor->buffer = buffer;
  1331. tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
  1332. tensor->backend = tensor->view_src->backend;
  1333. ggml_backend_buffer_init_tensor(buffer, tensor);
  1334. }
  1335. void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) {
  1336. GGML_ASSERT(tensor->buffer == NULL);
  1337. GGML_ASSERT(tensor->data == NULL);
  1338. GGML_ASSERT(tensor->view_src == NULL);
  1339. GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer));
  1340. GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
  1341. (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer));
  1342. tensor->buffer = buffer;
  1343. tensor->data = addr;
  1344. ggml_backend_buffer_init_tensor(buffer, tensor);
  1345. }
  1346. static struct ggml_tensor * graph_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
  1347. struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
  1348. GGML_ASSERT(src != NULL);
  1349. GGML_ASSERT(src->data && "graph must be allocated");
  1350. size_t id = ggml_hash_insert(hash_set, src);
  1351. if (id == GGML_HASHTABLE_ALREADY_EXISTS) {
  1352. return node_copies[ggml_hash_find(hash_set, src)];
  1353. }
  1354. struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
  1355. if (src->view_src != NULL) {
  1356. dst->view_src = graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
  1357. dst->view_offs = src->view_offs;
  1358. }
  1359. dst->op = src->op;
  1360. memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
  1361. ggml_set_name(dst, src->name);
  1362. // copy src
  1363. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1364. struct ggml_tensor * s = src->src[i];
  1365. if (s == NULL) {
  1366. break;
  1367. }
  1368. dst->src[i] = graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
  1369. }
  1370. node_copies[id] = dst;
  1371. return dst;
  1372. }
  1373. static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
  1374. size_t id = ggml_hash_find(hash_set, src);
  1375. if (node_init[id]) {
  1376. return;
  1377. }
  1378. node_init[id] = true;
  1379. struct ggml_tensor * dst = node_copies[id];
  1380. if (dst->view_src != NULL) {
  1381. graph_init_tensor(hash_set, node_copies, node_init, src->view_src);
  1382. ggml_backend_view_init(dst->view_src->buffer, dst);
  1383. }
  1384. else {
  1385. ggml_backend_tensor_copy(src, dst);
  1386. }
  1387. // init src
  1388. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1389. struct ggml_tensor * s = src->src[i];
  1390. if (s == NULL) {
  1391. break;
  1392. }
  1393. graph_init_tensor(hash_set, node_copies, node_init, s);
  1394. }
  1395. }
  1396. struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
  1397. struct ggml_hash_set hash_set = {
  1398. /* .size = */ graph->visited_hash_table.size,
  1399. /* .keys = */ calloc(sizeof(hash_set.keys[0]) * graph->visited_hash_table.size, 1)
  1400. };
  1401. struct ggml_tensor ** node_copies = calloc(sizeof(node_copies[0]) * hash_set.size, 1);
  1402. bool * node_init = calloc(sizeof(node_init[0]) * hash_set.size, 1);
  1403. struct ggml_init_params params = {
  1404. /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
  1405. /* .mem_buffer = */ NULL,
  1406. /* .no_alloc = */ true
  1407. };
  1408. struct ggml_context * ctx_allocated = ggml_init(params);
  1409. struct ggml_context * ctx_unallocated = ggml_init(params);
  1410. if (ctx_allocated == NULL || ctx_unallocated == NULL) {
  1411. fprintf(stderr, "failed to allocate context for graph copy\n");
  1412. free(hash_set.keys);
  1413. free(node_copies);
  1414. free(node_init);
  1415. ggml_free(ctx_allocated);
  1416. ggml_free(ctx_unallocated);
  1417. return (struct ggml_backend_graph_copy) {
  1418. /* .buffer = */ NULL,
  1419. /* .ctx_allocated = */ NULL,
  1420. /* .ctx_unallocated = */ NULL,
  1421. /* .graph = */ NULL,
  1422. };
  1423. }
  1424. // dup nodes
  1425. for (int i = 0; i < graph->n_nodes; i++) {
  1426. struct ggml_tensor * node = graph->nodes[i];
  1427. graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
  1428. }
  1429. // allocate nodes
  1430. ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
  1431. if (buffer == NULL) {
  1432. fprintf(stderr, "failed to allocate buffer for graph copy\n");
  1433. free(hash_set.keys);
  1434. free(node_copies);
  1435. free(node_init);
  1436. ggml_free(ctx_allocated);
  1437. ggml_free(ctx_unallocated);
  1438. return (struct ggml_backend_graph_copy) {
  1439. /* .buffer = */ NULL,
  1440. /* .ctx_allocated = */ NULL,
  1441. /* .ctx_unallocated = */ NULL,
  1442. /* .graph = */ NULL,
  1443. };
  1444. }
  1445. //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
  1446. // copy data and init views
  1447. for (int i = 0; i < graph->n_nodes; i++) {
  1448. struct ggml_tensor * node = graph->nodes[i];
  1449. graph_init_tensor(hash_set, node_copies, node_init, node);
  1450. }
  1451. // build graph copy
  1452. struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false);
  1453. for (int i = 0; i < graph->n_nodes; i++) {
  1454. struct ggml_tensor * node = graph->nodes[i];
  1455. struct ggml_tensor * node_copy = node_copies[ggml_hash_find(hash_set, node)];
  1456. graph_copy->nodes[i] = node_copy;
  1457. }
  1458. graph_copy->n_nodes = graph->n_nodes;
  1459. free(hash_set.keys);
  1460. free(node_copies);
  1461. free(node_init);
  1462. return (struct ggml_backend_graph_copy) {
  1463. /* .buffer = */ buffer,
  1464. /* .ctx_allocated = */ ctx_allocated,
  1465. /* .ctx_unallocated = */ ctx_unallocated,
  1466. /* .graph = */ graph_copy,
  1467. };
  1468. }
  1469. void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
  1470. ggml_backend_buffer_free(copy.buffer);
  1471. ggml_free(copy.ctx_allocated);
  1472. ggml_free(copy.ctx_unallocated);
  1473. }
  1474. bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
  1475. struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
  1476. if (copy.buffer == NULL) {
  1477. return false;
  1478. }
  1479. struct ggml_cgraph * g1 = graph;
  1480. struct ggml_cgraph * g2 = copy.graph;
  1481. assert(g1->n_nodes == g2->n_nodes);
  1482. for (int i = 0; i < g1->n_nodes; i++) {
  1483. //printf("eval %d/%d\n", i, g1->n_nodes);
  1484. struct ggml_tensor * t1 = g1->nodes[i];
  1485. struct ggml_tensor * t2 = g2->nodes[i];
  1486. assert(t1->op == t2->op && ggml_are_same_layout(t1, t2));
  1487. struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
  1488. struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
  1489. ggml_backend_graph_compute(backend1, &g1v);
  1490. ggml_backend_graph_compute(backend2, &g2v);
  1491. if (ggml_is_view_op(t1->op)) {
  1492. continue;
  1493. }
  1494. // compare results, calculate rms etc
  1495. if (!callback(i, t1, t2, user_data)) {
  1496. break;
  1497. }
  1498. }
  1499. ggml_backend_graph_copy_free(copy);
  1500. return true;
  1501. }