ggml-alloc.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. #include "ggml-alloc.h"
  2. #include "ggml-backend-impl.h"
  3. #include "ggml.h"
  4. #include "ggml-impl.h"
  5. #include <assert.h>
  6. #include <limits.h>
  7. #include <stdarg.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  12. #define MAX_FREE_BLOCKS 256
  13. //#define GGML_ALLOCATOR_DEBUG
  14. //#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__)
  15. #define AT_PRINTF(...)
  16. static bool ggml_is_view(const struct ggml_tensor * t) {
  17. return t->view_src != NULL;
  18. }
  19. // ops that return true for this function must not use restrict pointers for their backend implementations
  20. bool ggml_op_can_inplace(enum ggml_op op) {
  21. switch (op) {
  22. case GGML_OP_SCALE:
  23. case GGML_OP_DIAG_MASK_ZERO:
  24. case GGML_OP_DIAG_MASK_INF:
  25. case GGML_OP_ADD:
  26. case GGML_OP_ADD_ID:
  27. case GGML_OP_ADD1:
  28. case GGML_OP_SUB:
  29. case GGML_OP_MUL:
  30. case GGML_OP_DIV:
  31. case GGML_OP_SQR:
  32. case GGML_OP_SQRT:
  33. case GGML_OP_LOG:
  34. case GGML_OP_UNARY:
  35. case GGML_OP_ROPE:
  36. case GGML_OP_ROPE_BACK:
  37. case GGML_OP_SILU_BACK:
  38. case GGML_OP_RMS_NORM:
  39. case GGML_OP_RMS_NORM_BACK:
  40. case GGML_OP_SOFT_MAX:
  41. case GGML_OP_SOFT_MAX_BACK:
  42. return true;
  43. default:
  44. return false;
  45. }
  46. }
  47. static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
  48. assert(alignment && !(alignment & (alignment - 1))); // power of 2
  49. size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
  50. return offset + align;
  51. }
  52. // tallocr
  53. struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) {
  54. void * base = ggml_backend_buffer_get_base(buffer);
  55. size_t align = ggml_backend_buffer_get_alignment(buffer);
  56. assert(align && !(align & (align - 1))); // power of 2
  57. struct ggml_tallocr talloc = (struct ggml_tallocr) {
  58. /*.buffer = */ buffer,
  59. /*.base = */ base,
  60. /*.alignment = */ align,
  61. /*.offset = */ aligned_offset(base, 0, align),
  62. };
  63. return talloc;
  64. }
  65. enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) {
  66. size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
  67. size = GGML_PAD(size, talloc->alignment);
  68. if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) {
  69. GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
  70. __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
  71. GGML_ABORT("not enough space in the buffer");
  72. }
  73. void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
  74. talloc->offset += size;
  75. assert(((uintptr_t)addr % talloc->alignment) == 0);
  76. return ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
  77. }
  78. // dynamic tensor allocator
  79. #define GGML_VBUFFER_MAX_CHUNKS 16
  80. // relative memory address within an allocation that can be split into multiple buffers (chunks)
  81. struct buffer_address {
  82. int chunk; // index of a backend buffer
  83. size_t offset; // local memory offset within the buffer
  84. };
  85. static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX };
  86. static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) {
  87. return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset;
  88. }
  89. struct free_block {
  90. size_t offset;
  91. size_t size;
  92. };
  93. struct tallocr_chunk {
  94. struct free_block free_blocks[MAX_FREE_BLOCKS];
  95. int n_free_blocks;
  96. size_t max_size;
  97. };
  98. struct ggml_dyn_tallocr {
  99. size_t alignment;
  100. size_t max_chunk_size;
  101. struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS];
  102. int n_chunks;
  103. #ifdef GGML_ALLOCATOR_DEBUG
  104. struct {
  105. const struct ggml_tensor * tensor;
  106. struct buffer_address addr;
  107. } allocated_tensors[1024];
  108. #endif
  109. };
  110. static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) {
  111. GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
  112. // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
  113. int insert_pos = 0;
  114. while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) {
  115. insert_pos++;
  116. }
  117. // shift all blocks from insert_pos onward to make room for the new block
  118. for (int i = chunk->n_free_blocks; i > insert_pos; i--) {
  119. chunk->free_blocks[i] = chunk->free_blocks[i-1];
  120. }
  121. // insert the new block
  122. chunk->free_blocks[insert_pos].offset = offset;
  123. chunk->free_blocks[insert_pos].size = size;
  124. chunk->n_free_blocks++;
  125. }
  126. static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) {
  127. // shift all elements after idx by 1 to the left, overwriting the element at idx
  128. for (int i = idx; i < chunk->n_free_blocks; i++) {
  129. chunk->free_blocks[i] = chunk->free_blocks[i+1];
  130. }
  131. chunk->n_free_blocks--;
  132. }
  133. static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) {
  134. if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) {
  135. return -1;
  136. }
  137. struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk));
  138. chunk->n_free_blocks = 1;
  139. chunk->free_blocks[0].offset = 0;
  140. // available space in a chunk is limited to max_chunk_size, but can be higher if:
  141. // 1. a single tensor exceeds the maximum, and cannot fit any other way
  142. // 2. we are running out of chunks
  143. // backends will either manage to allocate the larger size, or report an error.
  144. chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size);
  145. if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) {
  146. chunk->free_blocks[0].size = SIZE_MAX/2;
  147. }
  148. alloc->chunks[alloc->n_chunks] = chunk;
  149. alloc->n_chunks++;
  150. return alloc->n_chunks - 1;
  151. }
  152. #ifdef GGML_ALLOCATOR_DEBUG
  153. static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
  154. for (int i = 0; i < 1024; i++) {
  155. if (alloc->allocated_tensors[i].tensor == NULL) {
  156. alloc->allocated_tensors[i].tensor = tensor;
  157. alloc->allocated_tensors[i].addr = addr;
  158. return;
  159. }
  160. }
  161. GGML_ABORT("out of allocated_tensors");
  162. }
  163. static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
  164. for (int i = 0; i < 1024; i++) {
  165. if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) {
  166. alloc->allocated_tensors[i].tensor = NULL;
  167. return;
  168. }
  169. }
  170. GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
  171. }
  172. #endif
  173. static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
  174. size = aligned_offset(NULL, size, alloc->alignment);
  175. AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
  176. int best_fit_chunk = -1;
  177. int best_fit_block = -1;
  178. size_t max_avail = 0;
  179. // find the best fitting free block besides the last block, within any chunk
  180. for (int c = 0; c < alloc->n_chunks; ++c) {
  181. struct tallocr_chunk * chunk = alloc->chunks[c];
  182. size_t best_fit_size = SIZE_MAX;
  183. for (int i = 0; i < chunk->n_free_blocks - 1; i++) {
  184. struct free_block * block = &chunk->free_blocks[i];
  185. max_avail = MAX(max_avail, block->size);
  186. if (block->size >= size && block->size <= best_fit_size) {
  187. best_fit_chunk = c;
  188. best_fit_block = i;
  189. best_fit_size = block->size;
  190. }
  191. }
  192. }
  193. if (best_fit_block == -1) {
  194. // no suitable block found, try the last block (this will grow a chunks size)
  195. for (int c = 0; c < alloc->n_chunks; ++c) {
  196. struct tallocr_chunk * chunk = alloc->chunks[c];
  197. if (chunk->n_free_blocks > 0) {
  198. struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1];
  199. max_avail = MAX(max_avail, block->size);
  200. if (block->size >= size) {
  201. best_fit_chunk = c;
  202. best_fit_block = chunk->n_free_blocks - 1;
  203. break;
  204. }
  205. }
  206. }
  207. }
  208. if (best_fit_block == -1) {
  209. // none of the existing chunks have enough space left
  210. best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size);
  211. best_fit_block = 0;
  212. }
  213. if (best_fit_chunk == -1) {
  214. // since the last chunk always has virtually endless memory, this should never happen
  215. GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
  216. __func__, size, max_avail);
  217. GGML_ABORT("graph allocation: failed to reserve memory");
  218. }
  219. struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk];
  220. struct free_block * block = &chunk->free_blocks[best_fit_block];
  221. struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset };
  222. block->offset += size;
  223. block->size -= size;
  224. if (block->size == 0) {
  225. // remove block if empty
  226. ggml_dyn_tallocr_remove_block(chunk, best_fit_block);
  227. }
  228. AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk);
  229. #ifdef GGML_ALLOCATOR_DEBUG
  230. add_allocated_tensor(alloc, addr, tensor);
  231. size_t cur_max = addr.offset + size;
  232. if (cur_max > alloc->max_size[addr.chunk]) {
  233. // sort allocated_tensors by chunk/offset
  234. for (int i = 0; i < 1024; i++) {
  235. for (int j = i + 1; j < 1024; j++) {
  236. if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) {
  237. const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
  238. struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr;
  239. alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
  240. alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr;
  241. alloc->allocated_tensors[j].tensor = tmp_tensor;
  242. alloc->allocated_tensors[j].addr = tmp_addr;
  243. }
  244. }
  245. }
  246. GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0);
  247. for (int i = 0; i < 1024; i++) {
  248. if (alloc->allocated_tensors[i].tensor) {
  249. GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
  250. alloc->allocated_tensors[i].addr.chunk,
  251. alloc->allocated_tensors[i].addr.offset,
  252. alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
  253. ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
  254. }
  255. }
  256. GGML_LOG_DEBUG("\n");
  257. }
  258. #endif
  259. chunk->max_size = MAX(chunk->max_size, addr.offset + size);
  260. return addr;
  261. GGML_UNUSED(tensor);
  262. }
  263. // this is a very naive implementation, but for our case the number of free blocks should be very small
  264. static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size, const struct ggml_tensor * tensor) {
  265. size = aligned_offset(NULL, size, alloc->alignment);
  266. AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n",
  267. __func__, tensor->name, addr.chunk, addr.offset, size, alloc->chunks[addr.chunk]->n_free_blocks);
  268. #ifdef GGML_ALLOCATOR_DEBUG
  269. remove_allocated_tensor(alloc, addr, tensor);
  270. #endif
  271. struct tallocr_chunk * chunk = alloc->chunks[addr.chunk];
  272. // see if we can merge with an existing block
  273. for (int i = 0; i < chunk->n_free_blocks; i++) {
  274. struct free_block * block = &chunk->free_blocks[i];
  275. // check if ptr is at the end of the block
  276. if (block->offset + block->size == addr.offset) {
  277. block->size += size;
  278. // check if we can merge with the next block
  279. if (i < chunk->n_free_blocks - 1) {
  280. struct free_block * next = &chunk->free_blocks[i+1];
  281. if (block->offset + block->size == next->offset) {
  282. block->size += next->size;
  283. ggml_dyn_tallocr_remove_block(chunk, i+1);
  284. }
  285. }
  286. return;
  287. }
  288. // check if ptr is at the beginning of the block
  289. if (addr.offset + size == block->offset) {
  290. block->offset = addr.offset;
  291. block->size += size;
  292. // check if we can merge with the previous block
  293. if (i > 0) {
  294. struct free_block * prev = &chunk->free_blocks[i-1];
  295. if (prev->offset + prev->size == block->offset) {
  296. prev->size += block->size;
  297. ggml_dyn_tallocr_remove_block(chunk, i);
  298. }
  299. }
  300. return;
  301. }
  302. }
  303. // otherwise, add a new block
  304. ggml_dyn_tallocr_insert_block(chunk, addr.offset, size);
  305. GGML_UNUSED(tensor);
  306. }
  307. static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
  308. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) {
  309. free(alloc->chunks[i]);
  310. alloc->chunks[i] = NULL;
  311. }
  312. alloc->n_chunks = 0;
  313. #ifdef GGML_ALLOCATOR_DEBUG
  314. for (int i = 0; i < 1024; i++) {
  315. alloc->allocated_tensors[i].tensor = NULL;
  316. }
  317. #endif
  318. }
  319. static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) {
  320. struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr));
  321. *alloc = (struct ggml_dyn_tallocr) {
  322. /*.alignment = */ alignment,
  323. /*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows
  324. /*.chunks = */ {NULL},
  325. /*.n_chunks = */ 0,
  326. #ifdef GGML_ALLOCATOR_DEBUG
  327. /*.allocated_tensors = */ {{0}},
  328. #endif
  329. };
  330. ggml_dyn_tallocr_reset(alloc);
  331. return alloc;
  332. }
  333. static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) {
  334. for (int i = 0; i < alloc->n_chunks; ++i) {
  335. free(alloc->chunks[i]);
  336. }
  337. free(alloc);
  338. }
  339. static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc) {
  340. size_t max_size = 0;
  341. for (int i = 0; i < alloc->n_chunks; i++) {
  342. max_size += alloc->chunks[i]->max_size;
  343. }
  344. return max_size;
  345. }
  346. // virtual buffer with contiguous memory range, split into multiple backend buffers (chunks)
  347. struct vbuffer {
  348. ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS];
  349. };
  350. static void ggml_vbuffer_free(struct vbuffer * buf) {
  351. if (buf == NULL) {
  352. return;
  353. }
  354. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) {
  355. ggml_backend_buffer_free(buf->chunks[i]);
  356. }
  357. free(buf);
  358. }
  359. static int ggml_vbuffer_n_chunks(struct vbuffer * buf) {
  360. int n = 0;
  361. while (n < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[n]) n++;
  362. return n;
  363. }
  364. static size_t ggml_vbuffer_size(struct vbuffer * buf) {
  365. size_t size = 0;
  366. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
  367. size += ggml_backend_buffer_get_size(buf->chunks[i]);
  368. }
  369. return size;
  370. }
  371. static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) {
  372. struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer));
  373. if (buf == NULL) {
  374. return NULL;
  375. }
  376. for (int n = 0; n < talloc->n_chunks; n++) {
  377. size_t chunk_size = talloc->chunks[n]->max_size;
  378. buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size);
  379. if (buf->chunks[n] == NULL) {
  380. ggml_vbuffer_free(buf);
  381. return NULL;
  382. }
  383. ggml_backend_buffer_set_usage(buf->chunks[n], usage);
  384. }
  385. return buf;
  386. }
  387. static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) {
  388. void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]);
  389. void * addr = (char *)base + buf_addr.offset;
  390. ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr);
  391. }
  392. static void ggml_vbuffer_reset(struct vbuffer * buf) {
  393. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
  394. ggml_backend_buffer_reset(buf->chunks[i]);
  395. }
  396. }
  397. /////////////////////////////////////
  398. // graph allocator
  399. struct hash_node {
  400. int n_children;
  401. int n_views;
  402. int buffer_id;
  403. struct buffer_address addr;
  404. bool allocated;
  405. };
  406. struct tensor_alloc {
  407. int buffer_id;
  408. struct buffer_address addr;
  409. size_t size_max; // 0 = pre-allocated, unused, or view
  410. };
  411. struct leaf_alloc {
  412. struct tensor_alloc leaf;
  413. };
  414. struct node_alloc {
  415. struct tensor_alloc dst;
  416. struct tensor_alloc src[GGML_MAX_SRC];
  417. };
  418. struct ggml_gallocr {
  419. ggml_backend_buffer_type_t * bufts; // [n_buffers]
  420. struct vbuffer ** buffers; // [n_buffers]
  421. struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
  422. int n_buffers;
  423. struct ggml_hash_set hash_set;
  424. struct hash_node * hash_values; // [hash_set.size]
  425. struct node_alloc * node_allocs; // [n_nodes]
  426. int n_nodes;
  427. struct leaf_alloc * leaf_allocs; // [n_leafs]
  428. int n_leafs;
  429. };
  430. ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) {
  431. ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr));
  432. GGML_ASSERT(galloc != NULL);
  433. galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t));
  434. GGML_ASSERT(galloc->bufts != NULL);
  435. galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *));
  436. GGML_ASSERT(galloc->buffers != NULL);
  437. galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
  438. GGML_ASSERT(galloc->buf_tallocs != NULL);
  439. for (int i = 0; i < n_bufs; i++) {
  440. galloc->bufts[i] = bufts[i];
  441. galloc->buffers[i] = NULL;
  442. // check if the same buffer type is used multiple times and reuse the same allocator
  443. for (int j = 0; j < i; j++) {
  444. if (bufts[i] == bufts[j]) {
  445. galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
  446. break;
  447. }
  448. }
  449. if (galloc->buf_tallocs[i] == NULL) {
  450. size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
  451. size_t max_size = ggml_backend_buft_get_max_size(bufts[i]);
  452. galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size);
  453. }
  454. }
  455. galloc->n_buffers = n_bufs;
  456. return galloc;
  457. }
  458. ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) {
  459. return ggml_gallocr_new_n(&buft, 1);
  460. }
  461. void ggml_gallocr_free(ggml_gallocr_t galloc) {
  462. if (galloc == NULL) {
  463. return;
  464. }
  465. for (int i = 0; i < galloc->n_buffers; i++) {
  466. if (galloc->buffers != NULL) {
  467. // skip if already freed
  468. bool freed = false;
  469. for (int j = 0; j < i; j++) {
  470. if (galloc->buffers[j] == galloc->buffers[i]) {
  471. freed = true;
  472. break;
  473. }
  474. }
  475. if (!freed) {
  476. ggml_vbuffer_free(galloc->buffers[i]);
  477. }
  478. }
  479. if (galloc->buf_tallocs != NULL) {
  480. // skip if already freed
  481. bool freed = false;
  482. for (int j = 0; j < i; j++) {
  483. if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
  484. freed = true;
  485. break;
  486. }
  487. }
  488. if (!freed) {
  489. ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
  490. }
  491. }
  492. }
  493. ggml_hash_set_free(&galloc->hash_set);
  494. free(galloc->hash_values);
  495. free(galloc->bufts);
  496. free(galloc->buffers);
  497. free(galloc->buf_tallocs);
  498. free(galloc->node_allocs);
  499. free(galloc->leaf_allocs);
  500. free(galloc);
  501. }
  502. typedef struct ggml_gallocr * ggml_gallocr_t;
  503. static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  504. size_t i = ggml_hash_find_or_insert(&galloc->hash_set, t);
  505. return &galloc->hash_values[i];
  506. }
  507. static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  508. return ggml_gallocr_hash_get(galloc, t)->allocated;
  509. }
  510. static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  511. return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
  512. }
  513. static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
  514. GGML_ASSERT(buffer_id >= 0);
  515. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  516. if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) {
  517. hn->allocated = true;
  518. assert(hn->addr.offset == 0);
  519. // try to reuse a parent's buffer (inplace)
  520. if (ggml_op_can_inplace(node->op)) {
  521. for (int i = 0; i < GGML_MAX_SRC; i++) {
  522. struct ggml_tensor * parent = node->src[i];
  523. if (parent == NULL) {
  524. continue;
  525. }
  526. // if the node's data is external, then we cannot re-use it
  527. if (!ggml_gallocr_is_own(galloc, parent)) {
  528. AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
  529. continue;
  530. }
  531. // outputs cannot be reused
  532. if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) {
  533. AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
  534. continue;
  535. }
  536. if (!ggml_are_same_layout(node, parent)) {
  537. AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
  538. continue;
  539. }
  540. struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
  541. if (p_hn->n_children == 1 && p_hn->n_views == 0) {
  542. if (ggml_is_view(parent)) {
  543. struct ggml_tensor * view_src = parent->view_src;
  544. struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
  545. if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
  546. AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
  547. assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset);
  548. hn->buffer_id = p_hn->buffer_id;
  549. hn->addr = p_hn->addr;
  550. p_hn->allocated = false; // avoid freeing the parent
  551. view_src_hn->allocated = false;
  552. return;
  553. }
  554. } else {
  555. AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
  556. hn->buffer_id = p_hn->buffer_id;
  557. hn->addr = p_hn->addr;
  558. p_hn->allocated = false; // avoid freeing the parent
  559. return;
  560. }
  561. }
  562. }
  563. }
  564. // allocate tensor from the buffer
  565. struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
  566. ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
  567. size_t size = ggml_backend_buft_get_alloc_size(buft, node);
  568. hn->buffer_id = buffer_id;
  569. hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node);
  570. }
  571. }
  572. static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
  573. // graph outputs are never freed
  574. if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
  575. AT_PRINTF("not freeing output %s\n", node->name);
  576. return;
  577. }
  578. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  579. int buffer_id = hn->buffer_id;
  580. struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
  581. ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
  582. size_t size = ggml_backend_buft_get_alloc_size(buft, node);
  583. ggml_dyn_tallocr_free_tensor(alloc, hn->addr, size, node);
  584. hn->allocated = false;
  585. }
  586. static int get_node_buffer_id(const int * node_buffer_ids, int i) {
  587. return node_buffer_ids ? node_buffer_ids[i] : 0;
  588. }
  589. static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
  590. // clear hash tables
  591. ggml_hash_set_reset(&galloc->hash_set);
  592. memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
  593. // allocate leafs
  594. // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
  595. for (int i = 0; i < graph->n_leafs; i++) {
  596. struct ggml_tensor * leaf = graph->leafs[i];
  597. ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
  598. }
  599. // count number of children and views
  600. // allocate other graph inputs and leafs first to avoid overwriting them
  601. for (int i = 0; i < graph->n_nodes; i++) {
  602. struct ggml_tensor * node = graph->nodes[i];
  603. // TODO: better way to add external dependencies
  604. // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
  605. // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
  606. // itself is never used and should not be considered a dependency
  607. if (ggml_is_view(node) && node->op != GGML_OP_NONE) {
  608. struct ggml_tensor * view_src = node->view_src;
  609. ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
  610. }
  611. if (node->flags & GGML_TENSOR_FLAG_INPUT) {
  612. ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
  613. }
  614. for (int j = 0; j < GGML_MAX_SRC; j++) {
  615. struct ggml_tensor * src = node->src[j];
  616. if (src == NULL) {
  617. continue;
  618. }
  619. ggml_gallocr_hash_get(galloc, src)->n_children += 1;
  620. // allocate explicit inputs
  621. if (src->flags & GGML_TENSOR_FLAG_INPUT) {
  622. ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
  623. }
  624. }
  625. }
  626. // allocate tensors
  627. for (int i = 0; i < graph->n_nodes; i++) {
  628. struct ggml_tensor * node = graph->nodes[i];
  629. int buffer_id = get_node_buffer_id(node_buffer_ids, i);
  630. // allocate parents (only leafs need to be allocated at this point)
  631. for (int j = 0; j < GGML_MAX_SRC; j++) {
  632. struct ggml_tensor * parent = node->src[j];
  633. if (parent == NULL) {
  634. continue;
  635. }
  636. ggml_gallocr_allocate_node(galloc, parent, buffer_id);
  637. }
  638. // allocate node
  639. ggml_gallocr_allocate_node(galloc, node, buffer_id);
  640. AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name);
  641. for (int j = 0; j < GGML_MAX_SRC; j++) {
  642. struct ggml_tensor * parent = node->src[j];
  643. if (parent == NULL) {
  644. continue;
  645. }
  646. AT_PRINTF("%s", parent->name);
  647. if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
  648. AT_PRINTF(", ");
  649. }
  650. }
  651. AT_PRINTF("\n");
  652. // update parents
  653. for (int j = 0; j < GGML_MAX_SRC; j++) {
  654. struct ggml_tensor * parent = node->src[j];
  655. if (parent == NULL) {
  656. continue;
  657. }
  658. struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
  659. p_hn->n_children -= 1;
  660. AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
  661. parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
  662. if (p_hn->n_children == 0 && p_hn->n_views == 0) {
  663. if (ggml_is_view(parent)) {
  664. struct ggml_tensor * view_src = parent->view_src;
  665. struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
  666. view_src_hn->n_views -= 1;
  667. AT_PRINTF("view_src %s: %d children, %d views\n",
  668. view_src->name, view_src_hn->n_children, view_src_hn->n_views);
  669. if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
  670. ggml_gallocr_free_node(galloc, view_src);
  671. }
  672. }
  673. else if (p_hn->allocated) {
  674. ggml_gallocr_free_node(galloc, parent);
  675. }
  676. }
  677. AT_PRINTF("\n");
  678. }
  679. }
  680. }
  681. bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
  682. size_t min_hash_size = graph->n_nodes + graph->n_leafs;
  683. // add 25% margin to avoid hash collisions
  684. min_hash_size += min_hash_size / 4;
  685. // initialize hash table
  686. if (galloc->hash_set.size < min_hash_size) {
  687. ggml_hash_set_free(&galloc->hash_set);
  688. galloc->hash_set = ggml_hash_set_new(min_hash_size);
  689. GGML_ASSERT(galloc->hash_set.keys != NULL);
  690. free(galloc->hash_values);
  691. galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
  692. GGML_ASSERT(galloc->hash_values != NULL);
  693. }
  694. // reset allocators
  695. for (int i = 0; i < galloc->n_buffers; i++) {
  696. ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
  697. }
  698. // allocate in hash table
  699. ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
  700. // set the node_allocs from the hash table
  701. if (galloc->n_nodes < graph->n_nodes) {
  702. free(galloc->node_allocs);
  703. galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
  704. GGML_ASSERT(galloc->node_allocs != NULL);
  705. }
  706. galloc->n_nodes = graph->n_nodes;
  707. for (int i = 0; i < graph->n_nodes; i++) {
  708. struct ggml_tensor * node = graph->nodes[i];
  709. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  710. if (node->view_src || node->data) {
  711. node_alloc->dst.buffer_id = -1;
  712. node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID;
  713. node_alloc->dst.size_max = 0;
  714. } else {
  715. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  716. node_alloc->dst.buffer_id = hn->buffer_id;
  717. node_alloc->dst.addr = hn->addr;
  718. node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
  719. }
  720. for (int j = 0; j < GGML_MAX_SRC; j++) {
  721. struct ggml_tensor * src = node->src[j];
  722. if (!src || src->view_src || src->data) {
  723. node_alloc->src[j].buffer_id = -1;
  724. node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID;
  725. node_alloc->src[j].size_max = 0;
  726. } else {
  727. struct hash_node * hn = ggml_gallocr_hash_get(galloc, src);
  728. node_alloc->src[j].buffer_id = hn->buffer_id;
  729. node_alloc->src[j].addr = hn->addr;
  730. node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
  731. }
  732. }
  733. }
  734. if (galloc->n_leafs < graph->n_leafs) {
  735. free(galloc->leaf_allocs);
  736. galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
  737. GGML_ASSERT(galloc->leaf_allocs != NULL);
  738. }
  739. galloc->n_leafs = graph->n_leafs;
  740. for (int i = 0; i < graph->n_leafs; i++) {
  741. struct ggml_tensor * leaf = graph->leafs[i];
  742. struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
  743. if (leaf->view_src || leaf->data) {
  744. galloc->leaf_allocs[i].leaf.buffer_id = -1;
  745. galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID;
  746. galloc->leaf_allocs[i].leaf.size_max = 0;
  747. } else {
  748. galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
  749. galloc->leaf_allocs[i].leaf.addr = hn->addr;
  750. galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
  751. }
  752. }
  753. // reallocate buffers if needed
  754. for (int i = 0; i < galloc->n_buffers; i++) {
  755. // if the buffer type is used multiple times, we reuse the same buffer
  756. for (int j = 0; j < i; j++) {
  757. if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
  758. galloc->buffers[i] = galloc->buffers[j];
  759. break;
  760. }
  761. }
  762. size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
  763. size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
  764. // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
  765. if (new_size > cur_size || galloc->buffers[i] == NULL) {
  766. #ifndef NDEBUG
  767. GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  768. #endif
  769. ggml_vbuffer_free(galloc->buffers[i]);
  770. galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
  771. if (galloc->buffers[i] == NULL) {
  772. GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
  773. return false;
  774. }
  775. }
  776. }
  777. return true;
  778. }
  779. bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
  780. return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
  781. }
  782. static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
  783. int buffer_id = tensor_alloc->buffer_id;
  784. assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
  785. if (tensor->view_src != NULL) {
  786. if (tensor->buffer == NULL) {
  787. assert(tensor_alloc->addr.offset == SIZE_MAX);
  788. if (tensor->view_src->buffer == NULL) {
  789. // this tensor was allocated without ggml-backend
  790. return;
  791. }
  792. ggml_backend_view_init(tensor);
  793. }
  794. } else {
  795. if (tensor->data == NULL) {
  796. assert(tensor_alloc->addr.offset != SIZE_MAX);
  797. assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
  798. ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr);
  799. } else {
  800. if (tensor->buffer == NULL) {
  801. // this tensor was allocated without ggml-backend
  802. return;
  803. }
  804. }
  805. }
  806. }
  807. static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
  808. size_t node_size = 0;
  809. if (!node->data && !node->view_src) {
  810. // If we previously had data but don't now then reallocate
  811. if (talloc->buffer_id < 0) {
  812. return false;
  813. }
  814. node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
  815. }
  816. return talloc->size_max >= node_size;
  817. }
  818. static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
  819. if (galloc->n_nodes != graph->n_nodes) {
  820. #ifndef NDEBUG
  821. GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
  822. #endif
  823. return true;
  824. }
  825. if (galloc->n_leafs != graph->n_leafs) {
  826. #ifndef NDEBUG
  827. GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
  828. #endif
  829. return true;
  830. }
  831. for (int i = 0; i < graph->n_nodes; i++) {
  832. struct ggml_tensor * node = graph->nodes[i];
  833. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  834. if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
  835. #ifndef NDEBUG
  836. GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
  837. #endif
  838. return true;
  839. }
  840. for (int j = 0; j < GGML_MAX_SRC; j++) {
  841. struct ggml_tensor * src = node->src[j];
  842. if (src == NULL) {
  843. continue;
  844. }
  845. if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
  846. #ifndef NDEBUG
  847. GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
  848. #endif
  849. return true;
  850. }
  851. }
  852. }
  853. return false;
  854. }
  855. bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
  856. if (ggml_gallocr_needs_realloc(galloc, graph)) {
  857. if (galloc->n_buffers == 1) {
  858. #ifndef NDEBUG
  859. GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
  860. #endif
  861. if (!ggml_gallocr_reserve(galloc, graph)) {
  862. return false;
  863. }
  864. } else {
  865. #ifndef NDEBUG
  866. GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
  867. #endif
  868. return false;
  869. }
  870. }
  871. // reset buffers
  872. for (int i = 0; i < galloc->n_buffers; i++) {
  873. if (galloc->buffers[i] != NULL) {
  874. ggml_vbuffer_reset(galloc->buffers[i]);
  875. }
  876. }
  877. // allocate the graph tensors from the previous assignments
  878. // leafs
  879. for (int i = 0; i < graph->n_leafs; i++) {
  880. struct ggml_tensor * leaf = graph->leafs[i];
  881. struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
  882. ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
  883. }
  884. // nodes
  885. for (int i = 0; i < graph->n_nodes; i++) {
  886. struct ggml_tensor * node = graph->nodes[i];
  887. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  888. for (int j = 0; j < GGML_MAX_SRC; j++) {
  889. struct ggml_tensor * src = node->src[j];
  890. if (src == NULL) {
  891. continue;
  892. }
  893. ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
  894. }
  895. ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
  896. }
  897. return true;
  898. }
  899. size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
  900. GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
  901. if (galloc->buffers[buffer_id] == NULL) {
  902. return 0;
  903. }
  904. for (int i = 0; i < buffer_id; i++) {
  905. if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
  906. // this buffer is the same as a previous one due to the same buffer type being used multiple times
  907. // only return the buffer size the first time it appears to avoid double counting
  908. return 0;
  909. }
  910. }
  911. return ggml_vbuffer_size(galloc->buffers[buffer_id]);
  912. }
  913. // utils
  914. static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) {
  915. for (size_t i = 0; i < *n_buffers; i++) {
  916. ggml_backend_buffer_free((*buffers)[i]);
  917. }
  918. free(*buffers);
  919. }
  920. static bool alloc_tensor_range(struct ggml_context * ctx,
  921. struct ggml_tensor * first, struct ggml_tensor * last,
  922. ggml_backend_buffer_type_t buft, size_t size,
  923. ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
  924. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
  925. if (buffer == NULL) {
  926. GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
  927. free_buffers(buffers, n_buffers);
  928. return false;
  929. }
  930. *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
  931. (*buffers)[(*n_buffers)++] = buffer;
  932. struct ggml_tallocr tallocr = ggml_tallocr_new(buffer);
  933. for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) {
  934. enum ggml_status status = GGML_STATUS_SUCCESS;
  935. if (t->data == NULL) {
  936. if (t->view_src == NULL) {
  937. status = ggml_tallocr_alloc(&tallocr, t);
  938. } else if (t->buffer == NULL) {
  939. status = ggml_backend_view_init(t);
  940. }
  941. } else {
  942. if (t->view_src != NULL && t->buffer == NULL) {
  943. // view of a pre-allocated tensor
  944. status = ggml_backend_view_init(t);
  945. }
  946. }
  947. if (status != GGML_STATUS_SUCCESS) {
  948. GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name);
  949. free_buffers(buffers, n_buffers);
  950. return false;
  951. }
  952. }
  953. return true;
  954. }
  955. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
  956. GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
  957. size_t alignment = ggml_backend_buft_get_alignment(buft);
  958. size_t max_size = ggml_backend_buft_get_max_size(buft);
  959. ggml_backend_buffer_t * buffers = NULL;
  960. size_t n_buffers = 0;
  961. size_t cur_buf_size = 0;
  962. struct ggml_tensor * first = ggml_get_first_tensor(ctx);
  963. for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  964. size_t this_size = 0;
  965. if (t->data == NULL && t->view_src == NULL) {
  966. this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
  967. }
  968. if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
  969. // allocate tensors in the current buffer
  970. if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
  971. return NULL;
  972. }
  973. first = t;
  974. cur_buf_size = this_size;
  975. } else {
  976. cur_buf_size += this_size;
  977. }
  978. }
  979. // allocate remaining tensors
  980. if (cur_buf_size > 0) {
  981. if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
  982. return NULL;
  983. }
  984. }
  985. if (n_buffers == 0) {
  986. #ifndef NDEBUG
  987. GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
  988. #endif
  989. return NULL;
  990. }
  991. ggml_backend_buffer_t buffer;
  992. if (n_buffers == 1) {
  993. buffer = buffers[0];
  994. } else {
  995. buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
  996. }
  997. free(buffers);
  998. return buffer;
  999. }
  1000. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
  1001. return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend));
  1002. }