ggml-alloc.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. #include "ggml-alloc.h"
  2. #include "ggml-backend-impl.h"
  3. #include "ggml.h"
  4. #include "ggml-impl.h"
  5. #include <assert.h>
  6. #include <limits.h>
  7. #include <stdarg.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  12. #define MAX_FREE_BLOCKS 256
  13. //#define GGML_ALLOCATOR_DEBUG
  14. //#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__)
  15. #define AT_PRINTF(...)
  16. static bool ggml_is_view(const struct ggml_tensor * t) {
  17. return t->view_src != NULL;
  18. }
  19. // ops that return true for this function must not use restrict pointers for their backend implementations
  20. bool ggml_op_can_inplace(enum ggml_op op) {
  21. switch (op) {
  22. case GGML_OP_SCALE:
  23. case GGML_OP_DIAG_MASK_ZERO:
  24. case GGML_OP_DIAG_MASK_INF:
  25. case GGML_OP_ADD:
  26. case GGML_OP_ADD_ID:
  27. case GGML_OP_ADD1:
  28. case GGML_OP_SUB:
  29. case GGML_OP_MUL:
  30. case GGML_OP_DIV:
  31. case GGML_OP_SQR:
  32. case GGML_OP_SQRT:
  33. case GGML_OP_LOG:
  34. case GGML_OP_UNARY:
  35. case GGML_OP_ROPE:
  36. case GGML_OP_ROPE_BACK:
  37. case GGML_OP_SILU_BACK:
  38. case GGML_OP_RMS_NORM:
  39. case GGML_OP_RMS_NORM_BACK:
  40. case GGML_OP_SOFT_MAX:
  41. case GGML_OP_SOFT_MAX_BACK:
  42. return true;
  43. default:
  44. return false;
  45. }
  46. }
  47. static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
  48. assert(alignment && !(alignment & (alignment - 1))); // power of 2
  49. size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
  50. return offset + align;
  51. }
  52. // tallocr
  53. struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) {
  54. void * base = ggml_backend_buffer_get_base(buffer);
  55. size_t align = ggml_backend_buffer_get_alignment(buffer);
  56. assert(align && !(align & (align - 1))); // power of 2
  57. struct ggml_tallocr talloc = (struct ggml_tallocr) {
  58. /*.buffer = */ buffer,
  59. /*.base = */ base,
  60. /*.alignment = */ align,
  61. /*.offset = */ aligned_offset(base, 0, align),
  62. };
  63. return talloc;
  64. }
  65. enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) {
  66. size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
  67. size = GGML_PAD(size, talloc->alignment);
  68. if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) {
  69. GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
  70. __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
  71. GGML_ABORT("not enough space in the buffer");
  72. }
  73. void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
  74. talloc->offset += size;
  75. assert(((uintptr_t)addr % talloc->alignment) == 0);
  76. return ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
  77. }
  78. // dynamic tensor allocator
  79. #define GGML_VBUFFER_MAX_CHUNKS 16
  80. // relative memory address within an allocation that can be split into multiple buffers (chunks)
  81. struct buffer_address {
  82. int chunk; // index of a backend buffer
  83. size_t offset; // local memory offset within the buffer
  84. };
  85. static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX };
  86. static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) {
  87. return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset;
  88. }
  89. struct free_block {
  90. size_t offset;
  91. size_t size;
  92. };
  93. struct tallocr_chunk {
  94. struct free_block free_blocks[MAX_FREE_BLOCKS];
  95. int n_free_blocks;
  96. size_t max_size;
  97. };
  98. struct ggml_dyn_tallocr {
  99. size_t alignment;
  100. size_t max_chunk_size;
  101. struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS];
  102. int n_chunks;
  103. #ifdef GGML_ALLOCATOR_DEBUG
  104. struct {
  105. const struct ggml_tensor * tensor;
  106. struct buffer_address addr;
  107. } allocated_tensors[1024];
  108. #endif
  109. };
  110. static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) {
  111. GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
  112. // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
  113. int insert_pos = 0;
  114. while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) {
  115. insert_pos++;
  116. }
  117. // shift all blocks from insert_pos onward to make room for the new block
  118. for (int i = chunk->n_free_blocks; i > insert_pos; i--) {
  119. chunk->free_blocks[i] = chunk->free_blocks[i-1];
  120. }
  121. // insert the new block
  122. chunk->free_blocks[insert_pos].offset = offset;
  123. chunk->free_blocks[insert_pos].size = size;
  124. chunk->n_free_blocks++;
  125. }
  126. static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) {
  127. // shift all elements after idx by 1 to the left, overwriting the element at idx
  128. for (int i = idx; i < chunk->n_free_blocks; i++) {
  129. chunk->free_blocks[i] = chunk->free_blocks[i+1];
  130. }
  131. chunk->n_free_blocks--;
  132. }
  133. static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) {
  134. if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) {
  135. return -1;
  136. }
  137. struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk));
  138. chunk->n_free_blocks = 1;
  139. chunk->free_blocks[0].offset = 0;
  140. // available space in a chunk is limited to max_chunk_size, but can be higher if:
  141. // 1. a single tensor exceeds the maximum, and cannot fit any other way
  142. // 2. we are running out of chunks
  143. // backends will either manage to allocate the larger size, or report an error.
  144. chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size);
  145. if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) {
  146. chunk->free_blocks[0].size = SIZE_MAX/2;
  147. }
  148. alloc->chunks[alloc->n_chunks] = chunk;
  149. alloc->n_chunks++;
  150. return alloc->n_chunks - 1;
  151. }
  152. #ifdef GGML_ALLOCATOR_DEBUG
  153. static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
  154. for (int i = 0; i < 1024; i++) {
  155. if (alloc->allocated_tensors[i].tensor == NULL) {
  156. alloc->allocated_tensors[i].tensor = tensor;
  157. alloc->allocated_tensors[i].addr = addr;
  158. return;
  159. }
  160. }
  161. GGML_ABORT("out of allocated_tensors");
  162. }
  163. static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
  164. for (int i = 0; i < 1024; i++) {
  165. if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) {
  166. alloc->allocated_tensors[i].tensor = NULL;
  167. return;
  168. }
  169. }
  170. GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
  171. }
  172. #endif
  173. static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
  174. size = aligned_offset(NULL, size, alloc->alignment);
  175. AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
  176. int best_fit_chunk = -1;
  177. int best_fit_block = -1;
  178. size_t max_avail = 0;
  179. // find the best fitting free block besides the last block, within any chunk
  180. for (int c = 0; c < alloc->n_chunks; ++c) {
  181. struct tallocr_chunk * chunk = alloc->chunks[c];
  182. size_t best_fit_size = SIZE_MAX;
  183. for (int i = 0; i < chunk->n_free_blocks - 1; i++) {
  184. struct free_block * block = &chunk->free_blocks[i];
  185. max_avail = MAX(max_avail, block->size);
  186. if (block->size >= size && block->size <= best_fit_size) {
  187. best_fit_chunk = c;
  188. best_fit_block = i;
  189. best_fit_size = block->size;
  190. }
  191. }
  192. }
  193. if (best_fit_block == -1) {
  194. // no suitable block found, try the last block (this may grow a chunks size)
  195. int64_t best_reuse = INT64_MIN;
  196. for (int c = 0; c < alloc->n_chunks; ++c) {
  197. struct tallocr_chunk * chunk = alloc->chunks[c];
  198. if (chunk->n_free_blocks > 0) {
  199. struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1];
  200. max_avail = MAX(max_avail, block->size);
  201. int64_t reuse_factor = chunk->max_size - block->offset - size;
  202. // reuse_factor < 0 : amount of extra memory that needs to be allocated
  203. // reuse_factor = 0 : allocated free space exactly matches tensor size
  204. // reuse_factor > 0 : superfluous memory that will remain unused
  205. bool better_reuse = best_reuse < 0 && reuse_factor > best_reuse;
  206. bool better_fit = reuse_factor >= 0 && reuse_factor < best_reuse;
  207. if (block->size >= size && (better_reuse || better_fit)) {
  208. best_fit_chunk = c;
  209. best_fit_block = chunk->n_free_blocks - 1;
  210. best_reuse = reuse_factor;
  211. }
  212. }
  213. }
  214. }
  215. if (best_fit_block == -1) {
  216. // none of the existing chunks have enough space left
  217. best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size);
  218. best_fit_block = 0;
  219. }
  220. if (best_fit_chunk == -1) {
  221. // since the last chunk always has virtually endless memory, this should never happen
  222. GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
  223. __func__, size, max_avail);
  224. GGML_ABORT("graph allocation: failed to reserve memory");
  225. }
  226. struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk];
  227. struct free_block * block = &chunk->free_blocks[best_fit_block];
  228. struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset };
  229. block->offset += size;
  230. block->size -= size;
  231. if (block->size == 0) {
  232. // remove block if empty
  233. ggml_dyn_tallocr_remove_block(chunk, best_fit_block);
  234. }
  235. AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk);
  236. #ifdef GGML_ALLOCATOR_DEBUG
  237. add_allocated_tensor(alloc, addr, tensor);
  238. size_t cur_max = addr.offset + size;
  239. if (cur_max > chunk->max_size) {
  240. // sort allocated_tensors by chunk/offset
  241. for (int i = 0; i < 1024; i++) {
  242. for (int j = i + 1; j < 1024; j++) {
  243. if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) {
  244. const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
  245. struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr;
  246. alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
  247. alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr;
  248. alloc->allocated_tensors[j].tensor = tmp_tensor;
  249. alloc->allocated_tensors[j].addr = tmp_addr;
  250. }
  251. }
  252. }
  253. GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0);
  254. for (int i = 0; i < 1024; i++) {
  255. if (alloc->allocated_tensors[i].tensor) {
  256. GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
  257. alloc->allocated_tensors[i].addr.chunk,
  258. alloc->allocated_tensors[i].addr.offset,
  259. alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
  260. ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
  261. }
  262. }
  263. GGML_LOG_DEBUG("\n");
  264. }
  265. #endif
  266. chunk->max_size = MAX(chunk->max_size, addr.offset + size);
  267. return addr;
  268. GGML_UNUSED(tensor);
  269. }
  270. // this is a very naive implementation, but for our case the number of free blocks should be very small
  271. static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size, const struct ggml_tensor * tensor) {
  272. size = aligned_offset(NULL, size, alloc->alignment);
  273. AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n",
  274. __func__, tensor->name, addr.chunk, addr.offset, size, alloc->chunks[addr.chunk]->n_free_blocks);
  275. #ifdef GGML_ALLOCATOR_DEBUG
  276. remove_allocated_tensor(alloc, addr, tensor);
  277. #endif
  278. struct tallocr_chunk * chunk = alloc->chunks[addr.chunk];
  279. // see if we can merge with an existing block
  280. for (int i = 0; i < chunk->n_free_blocks; i++) {
  281. struct free_block * block = &chunk->free_blocks[i];
  282. // check if ptr is at the end of the block
  283. if (block->offset + block->size == addr.offset) {
  284. block->size += size;
  285. // check if we can merge with the next block
  286. if (i < chunk->n_free_blocks - 1) {
  287. struct free_block * next = &chunk->free_blocks[i+1];
  288. if (block->offset + block->size == next->offset) {
  289. block->size += next->size;
  290. ggml_dyn_tallocr_remove_block(chunk, i+1);
  291. }
  292. }
  293. return;
  294. }
  295. // check if ptr is at the beginning of the block
  296. if (addr.offset + size == block->offset) {
  297. block->offset = addr.offset;
  298. block->size += size;
  299. // check if we can merge with the previous block
  300. if (i > 0) {
  301. struct free_block * prev = &chunk->free_blocks[i-1];
  302. if (prev->offset + prev->size == block->offset) {
  303. prev->size += block->size;
  304. ggml_dyn_tallocr_remove_block(chunk, i);
  305. }
  306. }
  307. return;
  308. }
  309. }
  310. // otherwise, add a new block
  311. ggml_dyn_tallocr_insert_block(chunk, addr.offset, size);
  312. GGML_UNUSED(tensor);
  313. }
  314. static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
  315. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) {
  316. free(alloc->chunks[i]);
  317. alloc->chunks[i] = NULL;
  318. }
  319. alloc->n_chunks = 0;
  320. #ifdef GGML_ALLOCATOR_DEBUG
  321. for (int i = 0; i < 1024; i++) {
  322. alloc->allocated_tensors[i].tensor = NULL;
  323. }
  324. #endif
  325. }
  326. static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) {
  327. struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr));
  328. *alloc = (struct ggml_dyn_tallocr) {
  329. /*.alignment = */ alignment,
  330. /*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows
  331. /*.chunks = */ {NULL},
  332. /*.n_chunks = */ 0,
  333. #ifdef GGML_ALLOCATOR_DEBUG
  334. /*.allocated_tensors = */ {{0}},
  335. #endif
  336. };
  337. ggml_dyn_tallocr_reset(alloc);
  338. return alloc;
  339. }
  340. static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) {
  341. for (int i = 0; i < alloc->n_chunks; ++i) {
  342. free(alloc->chunks[i]);
  343. }
  344. free(alloc);
  345. }
  346. static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc, int chunk) {
  347. return chunk < alloc->n_chunks ? alloc->chunks[chunk]->max_size : 0;
  348. }
  349. // virtual buffer with contiguous memory range, split into multiple backend buffers (chunks)
  350. struct vbuffer {
  351. ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS];
  352. };
  353. static void ggml_vbuffer_free(struct vbuffer * buf) {
  354. if (buf == NULL) {
  355. return;
  356. }
  357. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) {
  358. ggml_backend_buffer_free(buf->chunks[i]);
  359. }
  360. free(buf);
  361. }
  362. static size_t ggml_vbuffer_chunk_size(struct vbuffer * buf, int chunk) {
  363. return buf->chunks[chunk] ? ggml_backend_buffer_get_size(buf->chunks[chunk]) : 0;
  364. }
  365. static size_t ggml_vbuffer_size(struct vbuffer * buf) {
  366. size_t size = 0;
  367. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
  368. size += ggml_backend_buffer_get_size(buf->chunks[i]);
  369. }
  370. return size;
  371. }
  372. static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) {
  373. struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer));
  374. if (buf == NULL) {
  375. return NULL;
  376. }
  377. for (int n = 0; n < talloc->n_chunks; n++) {
  378. size_t chunk_size = talloc->chunks[n]->max_size;
  379. buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size);
  380. if (buf->chunks[n] == NULL) {
  381. ggml_vbuffer_free(buf);
  382. return NULL;
  383. }
  384. ggml_backend_buffer_set_usage(buf->chunks[n], usage);
  385. }
  386. return buf;
  387. }
  388. static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) {
  389. void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]);
  390. void * addr = (char *)base + buf_addr.offset;
  391. ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr);
  392. }
  393. static void ggml_vbuffer_reset(struct vbuffer * buf) {
  394. for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
  395. ggml_backend_buffer_reset(buf->chunks[i]);
  396. }
  397. }
  398. /////////////////////////////////////
  399. // graph allocator
  400. struct hash_node {
  401. int n_children;
  402. int n_views;
  403. int buffer_id;
  404. struct buffer_address addr;
  405. bool allocated;
  406. };
  407. struct tensor_alloc {
  408. int buffer_id;
  409. struct buffer_address addr;
  410. size_t size_max; // 0 = pre-allocated, unused, or view
  411. };
  412. struct leaf_alloc {
  413. struct tensor_alloc leaf;
  414. };
  415. struct node_alloc {
  416. struct tensor_alloc dst;
  417. struct tensor_alloc src[GGML_MAX_SRC];
  418. };
  419. struct ggml_gallocr {
  420. ggml_backend_buffer_type_t * bufts; // [n_buffers]
  421. struct vbuffer ** buffers; // [n_buffers]
  422. struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
  423. int n_buffers;
  424. struct ggml_hash_set hash_set;
  425. struct hash_node * hash_values; // [hash_set.size]
  426. struct node_alloc * node_allocs; // [n_nodes]
  427. int n_nodes;
  428. struct leaf_alloc * leaf_allocs; // [n_leafs]
  429. int n_leafs;
  430. };
  431. ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) {
  432. ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr));
  433. GGML_ASSERT(galloc != NULL);
  434. galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t));
  435. GGML_ASSERT(galloc->bufts != NULL);
  436. galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *));
  437. GGML_ASSERT(galloc->buffers != NULL);
  438. galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
  439. GGML_ASSERT(galloc->buf_tallocs != NULL);
  440. for (int i = 0; i < n_bufs; i++) {
  441. galloc->bufts[i] = bufts[i];
  442. galloc->buffers[i] = NULL;
  443. // check if the same buffer type is used multiple times and reuse the same allocator
  444. for (int j = 0; j < i; j++) {
  445. if (bufts[i] == bufts[j]) {
  446. galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
  447. break;
  448. }
  449. }
  450. if (galloc->buf_tallocs[i] == NULL) {
  451. size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
  452. size_t max_size = ggml_backend_buft_get_max_size(bufts[i]);
  453. galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size);
  454. }
  455. }
  456. galloc->n_buffers = n_bufs;
  457. return galloc;
  458. }
  459. ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) {
  460. return ggml_gallocr_new_n(&buft, 1);
  461. }
  462. void ggml_gallocr_free(ggml_gallocr_t galloc) {
  463. if (galloc == NULL) {
  464. return;
  465. }
  466. for (int i = 0; i < galloc->n_buffers; i++) {
  467. if (galloc->buffers != NULL) {
  468. // skip if already freed
  469. bool freed = false;
  470. for (int j = 0; j < i; j++) {
  471. if (galloc->buffers[j] == galloc->buffers[i]) {
  472. freed = true;
  473. break;
  474. }
  475. }
  476. if (!freed) {
  477. ggml_vbuffer_free(galloc->buffers[i]);
  478. }
  479. }
  480. if (galloc->buf_tallocs != NULL) {
  481. // skip if already freed
  482. bool freed = false;
  483. for (int j = 0; j < i; j++) {
  484. if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
  485. freed = true;
  486. break;
  487. }
  488. }
  489. if (!freed) {
  490. ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
  491. }
  492. }
  493. }
  494. ggml_hash_set_free(&galloc->hash_set);
  495. free(galloc->hash_values);
  496. free(galloc->bufts);
  497. free(galloc->buffers);
  498. free(galloc->buf_tallocs);
  499. free(galloc->node_allocs);
  500. free(galloc->leaf_allocs);
  501. free(galloc);
  502. }
  503. typedef struct ggml_gallocr * ggml_gallocr_t;
  504. static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  505. size_t i = ggml_hash_find_or_insert(&galloc->hash_set, t);
  506. return &galloc->hash_values[i];
  507. }
  508. static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  509. return ggml_gallocr_hash_get(galloc, t)->allocated;
  510. }
  511. static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  512. return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
  513. }
  514. // free the extra space at the end if the new tensor is smaller
  515. static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) {
  516. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  517. struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
  518. size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent);
  519. size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
  520. GGML_ASSERT(parent_size >= node_size);
  521. if (parent_size > node_size) {
  522. struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id];
  523. struct buffer_address p_addr = p_hn->addr;
  524. p_addr.offset += node_size;
  525. size_t extra_size = parent_size - node_size;
  526. AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name);
  527. ggml_dyn_tallocr_free_tensor(p_alloc, p_addr, extra_size, parent);
  528. }
  529. }
  530. static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
  531. GGML_ASSERT(buffer_id >= 0);
  532. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  533. if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) {
  534. hn->allocated = true;
  535. assert(hn->addr.offset == 0);
  536. // try to reuse a parent's buffer (inplace)
  537. if (ggml_op_can_inplace(node->op)) {
  538. for (int i = 0; i < GGML_MAX_SRC; i++) {
  539. struct ggml_tensor * parent = node->src[i];
  540. if (parent == NULL) {
  541. continue;
  542. }
  543. // if the node's data is external, then we cannot re-use it
  544. if (!ggml_gallocr_is_own(galloc, parent)) {
  545. AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
  546. continue;
  547. }
  548. // outputs cannot be reused
  549. if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) {
  550. AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
  551. continue;
  552. }
  553. if (!ggml_are_same_layout(node, parent)) {
  554. AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
  555. continue;
  556. }
  557. struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
  558. if (p_hn->n_children == 1 && p_hn->n_views == 0) {
  559. if (ggml_is_view(parent)) {
  560. struct ggml_tensor * view_src = parent->view_src;
  561. struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
  562. if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
  563. AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
  564. assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset);
  565. hn->buffer_id = p_hn->buffer_id;
  566. hn->addr = p_hn->addr;
  567. p_hn->allocated = false; // avoid freeing the parent
  568. view_src_hn->allocated = false;
  569. ggml_gallocr_free_extra_space(galloc, node, view_src);
  570. return;
  571. }
  572. } else {
  573. AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
  574. hn->buffer_id = p_hn->buffer_id;
  575. hn->addr = p_hn->addr;
  576. p_hn->allocated = false; // avoid freeing the parent
  577. ggml_gallocr_free_extra_space(galloc, node, parent);
  578. return;
  579. }
  580. }
  581. }
  582. }
  583. // allocate tensor from the buffer
  584. struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
  585. ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
  586. size_t size = ggml_backend_buft_get_alloc_size(buft, node);
  587. hn->buffer_id = buffer_id;
  588. hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node);
  589. }
  590. }
  591. static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
  592. // graph outputs are never freed
  593. if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
  594. AT_PRINTF("not freeing output %s\n", node->name);
  595. return;
  596. }
  597. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  598. int buffer_id = hn->buffer_id;
  599. struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
  600. ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
  601. size_t size = ggml_backend_buft_get_alloc_size(buft, node);
  602. ggml_dyn_tallocr_free_tensor(alloc, hn->addr, size, node);
  603. hn->allocated = false;
  604. }
  605. static int get_node_buffer_id(const int * node_buffer_ids, int i) {
  606. return node_buffer_ids ? node_buffer_ids[i] : 0;
  607. }
  608. static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
  609. // clear hash tables
  610. ggml_hash_set_reset(&galloc->hash_set);
  611. memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
  612. // allocate leafs
  613. // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
  614. for (int i = 0; i < graph->n_leafs; i++) {
  615. struct ggml_tensor * leaf = graph->leafs[i];
  616. ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
  617. }
  618. // count number of children and views
  619. // allocate other graph inputs and leafs first to avoid overwriting them
  620. for (int i = 0; i < graph->n_nodes; i++) {
  621. struct ggml_tensor * node = graph->nodes[i];
  622. // TODO: better way to add external dependencies
  623. // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
  624. // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
  625. // itself is never used and should not be considered a dependency
  626. if (ggml_is_view(node) && node->op != GGML_OP_NONE) {
  627. struct ggml_tensor * view_src = node->view_src;
  628. ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
  629. }
  630. if (node->flags & GGML_TENSOR_FLAG_INPUT) {
  631. ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
  632. }
  633. for (int j = 0; j < GGML_MAX_SRC; j++) {
  634. struct ggml_tensor * src = node->src[j];
  635. if (src == NULL) {
  636. continue;
  637. }
  638. ggml_gallocr_hash_get(galloc, src)->n_children += 1;
  639. // allocate explicit inputs
  640. if (src->flags & GGML_TENSOR_FLAG_INPUT) {
  641. ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
  642. }
  643. }
  644. }
  645. // allocate tensors
  646. for (int i = 0; i < graph->n_nodes; i++) {
  647. struct ggml_tensor * node = graph->nodes[i];
  648. int buffer_id = get_node_buffer_id(node_buffer_ids, i);
  649. // allocate parents (only leafs need to be allocated at this point)
  650. for (int j = 0; j < GGML_MAX_SRC; j++) {
  651. struct ggml_tensor * parent = node->src[j];
  652. if (parent == NULL) {
  653. continue;
  654. }
  655. ggml_gallocr_allocate_node(galloc, parent, buffer_id);
  656. }
  657. // allocate node
  658. ggml_gallocr_allocate_node(galloc, node, buffer_id);
  659. AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name);
  660. for (int j = 0; j < GGML_MAX_SRC; j++) {
  661. struct ggml_tensor * parent = node->src[j];
  662. if (parent == NULL) {
  663. continue;
  664. }
  665. AT_PRINTF("%s", parent->name);
  666. if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
  667. AT_PRINTF(", ");
  668. }
  669. }
  670. AT_PRINTF("\n");
  671. // update parents
  672. for (int j = 0; j < GGML_MAX_SRC; j++) {
  673. struct ggml_tensor * parent = node->src[j];
  674. if (parent == NULL) {
  675. continue;
  676. }
  677. struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
  678. p_hn->n_children -= 1;
  679. AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
  680. parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
  681. if (p_hn->n_children == 0 && p_hn->n_views == 0) {
  682. if (ggml_is_view(parent)) {
  683. struct ggml_tensor * view_src = parent->view_src;
  684. struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
  685. view_src_hn->n_views -= 1;
  686. AT_PRINTF("view_src %s: %d children, %d views\n",
  687. view_src->name, view_src_hn->n_children, view_src_hn->n_views);
  688. if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
  689. ggml_gallocr_free_node(galloc, view_src);
  690. }
  691. }
  692. else if (p_hn->allocated) {
  693. ggml_gallocr_free_node(galloc, parent);
  694. }
  695. }
  696. AT_PRINTF("\n");
  697. }
  698. }
  699. }
  700. bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
  701. size_t min_hash_size = graph->n_nodes + graph->n_leafs;
  702. // add 25% margin to avoid hash collisions
  703. min_hash_size += min_hash_size / 4;
  704. // initialize hash table
  705. if (galloc->hash_set.size < min_hash_size) {
  706. ggml_hash_set_free(&galloc->hash_set);
  707. galloc->hash_set = ggml_hash_set_new(min_hash_size);
  708. GGML_ASSERT(galloc->hash_set.keys != NULL);
  709. free(galloc->hash_values);
  710. galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
  711. GGML_ASSERT(galloc->hash_values != NULL);
  712. }
  713. // reset allocators
  714. for (int i = 0; i < galloc->n_buffers; i++) {
  715. ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
  716. }
  717. // allocate in hash table
  718. ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
  719. // set the node_allocs from the hash table
  720. if (galloc->n_nodes < graph->n_nodes) {
  721. free(galloc->node_allocs);
  722. galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
  723. GGML_ASSERT(galloc->node_allocs != NULL);
  724. }
  725. galloc->n_nodes = graph->n_nodes;
  726. for (int i = 0; i < graph->n_nodes; i++) {
  727. struct ggml_tensor * node = graph->nodes[i];
  728. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  729. if (node->view_src || node->data) {
  730. node_alloc->dst.buffer_id = -1;
  731. node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID;
  732. node_alloc->dst.size_max = 0;
  733. } else {
  734. struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
  735. node_alloc->dst.buffer_id = hn->buffer_id;
  736. node_alloc->dst.addr = hn->addr;
  737. node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
  738. }
  739. for (int j = 0; j < GGML_MAX_SRC; j++) {
  740. struct ggml_tensor * src = node->src[j];
  741. if (!src || src->view_src || src->data) {
  742. node_alloc->src[j].buffer_id = -1;
  743. node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID;
  744. node_alloc->src[j].size_max = 0;
  745. } else {
  746. struct hash_node * hn = ggml_gallocr_hash_get(galloc, src);
  747. node_alloc->src[j].buffer_id = hn->buffer_id;
  748. node_alloc->src[j].addr = hn->addr;
  749. node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
  750. }
  751. }
  752. }
  753. if (galloc->n_leafs < graph->n_leafs) {
  754. free(galloc->leaf_allocs);
  755. galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
  756. GGML_ASSERT(galloc->leaf_allocs != NULL);
  757. }
  758. galloc->n_leafs = graph->n_leafs;
  759. for (int i = 0; i < graph->n_leafs; i++) {
  760. struct ggml_tensor * leaf = graph->leafs[i];
  761. struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
  762. if (leaf->view_src || leaf->data) {
  763. galloc->leaf_allocs[i].leaf.buffer_id = -1;
  764. galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID;
  765. galloc->leaf_allocs[i].leaf.size_max = 0;
  766. } else {
  767. galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
  768. galloc->leaf_allocs[i].leaf.addr = hn->addr;
  769. galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
  770. }
  771. }
  772. // reallocate buffers if needed
  773. for (int i = 0; i < galloc->n_buffers; i++) {
  774. // if the buffer type is used multiple times, we reuse the same buffer
  775. for (int j = 0; j < i; j++) {
  776. if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
  777. galloc->buffers[i] = galloc->buffers[j];
  778. break;
  779. }
  780. }
  781. // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
  782. bool realloc = galloc->buffers[i] == NULL;
  783. size_t new_size = 0;
  784. for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) {
  785. size_t cur_chunk_size = galloc->buffers[i] ? ggml_vbuffer_chunk_size(galloc->buffers[i], c) : 0;
  786. size_t new_chunk_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i], c);
  787. new_size += new_chunk_size;
  788. if (new_chunk_size > cur_chunk_size) {
  789. realloc = true;
  790. }
  791. }
  792. if (realloc) {
  793. #ifndef NDEBUG
  794. {
  795. size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
  796. if (cur_size > 0) {
  797. GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n",
  798. __func__, ggml_backend_buft_name(galloc->bufts[i]),
  799. cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  800. }
  801. }
  802. #endif
  803. ggml_vbuffer_free(galloc->buffers[i]);
  804. galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
  805. if (galloc->buffers[i] == NULL) {
  806. GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
  807. return false;
  808. }
  809. }
  810. }
  811. return true;
  812. }
  813. bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
  814. return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
  815. }
  816. static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
  817. int buffer_id = tensor_alloc->buffer_id;
  818. assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
  819. if (tensor->view_src != NULL) {
  820. if (tensor->buffer == NULL) {
  821. assert(tensor_alloc->addr.offset == SIZE_MAX);
  822. if (tensor->view_src->buffer == NULL) {
  823. // this tensor was allocated without ggml-backend
  824. return;
  825. }
  826. ggml_backend_view_init(tensor);
  827. }
  828. } else {
  829. if (tensor->data == NULL) {
  830. assert(tensor_alloc->addr.offset != SIZE_MAX);
  831. assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
  832. ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr);
  833. } else {
  834. if (tensor->buffer == NULL) {
  835. // this tensor was allocated without ggml-backend
  836. return;
  837. }
  838. }
  839. }
  840. }
  841. static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
  842. size_t node_size = 0;
  843. if (!node->data && !node->view_src) {
  844. // If we previously had data but don't now then reallocate
  845. if (talloc->buffer_id < 0) {
  846. return false;
  847. }
  848. node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
  849. }
  850. return talloc->size_max >= node_size;
  851. }
  852. static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
  853. if (galloc->n_nodes != graph->n_nodes) {
  854. #ifndef NDEBUG
  855. GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
  856. #endif
  857. return true;
  858. }
  859. if (galloc->n_leafs != graph->n_leafs) {
  860. #ifndef NDEBUG
  861. GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
  862. #endif
  863. return true;
  864. }
  865. for (int i = 0; i < graph->n_nodes; i++) {
  866. struct ggml_tensor * node = graph->nodes[i];
  867. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  868. if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
  869. #ifndef NDEBUG
  870. GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
  871. #endif
  872. return true;
  873. }
  874. for (int j = 0; j < GGML_MAX_SRC; j++) {
  875. struct ggml_tensor * src = node->src[j];
  876. if (src == NULL) {
  877. continue;
  878. }
  879. if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
  880. #ifndef NDEBUG
  881. GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
  882. #endif
  883. return true;
  884. }
  885. }
  886. }
  887. return false;
  888. }
  889. bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
  890. if (ggml_gallocr_needs_realloc(galloc, graph)) {
  891. if (galloc->n_buffers == 1) {
  892. #ifndef NDEBUG
  893. GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
  894. #endif
  895. if (!ggml_gallocr_reserve(galloc, graph)) {
  896. return false;
  897. }
  898. } else {
  899. #ifndef NDEBUG
  900. GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
  901. #endif
  902. return false;
  903. }
  904. }
  905. // reset buffers
  906. for (int i = 0; i < galloc->n_buffers; i++) {
  907. if (galloc->buffers[i] != NULL) {
  908. ggml_vbuffer_reset(galloc->buffers[i]);
  909. }
  910. }
  911. // allocate the graph tensors from the previous assignments
  912. // leafs
  913. for (int i = 0; i < graph->n_leafs; i++) {
  914. struct ggml_tensor * leaf = graph->leafs[i];
  915. struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
  916. ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
  917. }
  918. // nodes
  919. for (int i = 0; i < graph->n_nodes; i++) {
  920. struct ggml_tensor * node = graph->nodes[i];
  921. struct node_alloc * node_alloc = &galloc->node_allocs[i];
  922. for (int j = 0; j < GGML_MAX_SRC; j++) {
  923. struct ggml_tensor * src = node->src[j];
  924. if (src == NULL) {
  925. continue;
  926. }
  927. ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
  928. }
  929. ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
  930. }
  931. return true;
  932. }
  933. size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
  934. GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
  935. if (galloc->buffers[buffer_id] == NULL) {
  936. return 0;
  937. }
  938. for (int i = 0; i < buffer_id; i++) {
  939. if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
  940. // this buffer is the same as a previous one due to the same buffer type being used multiple times
  941. // only return the buffer size the first time it appears to avoid double counting
  942. return 0;
  943. }
  944. }
  945. return ggml_vbuffer_size(galloc->buffers[buffer_id]);
  946. }
  947. // utils
  948. static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) {
  949. for (size_t i = 0; i < *n_buffers; i++) {
  950. ggml_backend_buffer_free((*buffers)[i]);
  951. }
  952. free(*buffers);
  953. }
  954. static bool alloc_tensor_range(struct ggml_context * ctx,
  955. struct ggml_tensor * first, struct ggml_tensor * last,
  956. ggml_backend_buffer_type_t buft, size_t size,
  957. ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
  958. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
  959. if (buffer == NULL) {
  960. GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
  961. free_buffers(buffers, n_buffers);
  962. return false;
  963. }
  964. *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
  965. (*buffers)[(*n_buffers)++] = buffer;
  966. struct ggml_tallocr tallocr = ggml_tallocr_new(buffer);
  967. for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) {
  968. enum ggml_status status = GGML_STATUS_SUCCESS;
  969. if (t->data == NULL) {
  970. if (t->view_src == NULL) {
  971. status = ggml_tallocr_alloc(&tallocr, t);
  972. } else if (t->buffer == NULL) {
  973. status = ggml_backend_view_init(t);
  974. }
  975. } else {
  976. if (t->view_src != NULL && t->buffer == NULL) {
  977. // view of a pre-allocated tensor
  978. status = ggml_backend_view_init(t);
  979. }
  980. }
  981. if (status != GGML_STATUS_SUCCESS) {
  982. GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name);
  983. free_buffers(buffers, n_buffers);
  984. return false;
  985. }
  986. }
  987. return true;
  988. }
  989. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
  990. GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
  991. size_t alignment = ggml_backend_buft_get_alignment(buft);
  992. size_t max_size = ggml_backend_buft_get_max_size(buft);
  993. ggml_backend_buffer_t * buffers = NULL;
  994. size_t n_buffers = 0;
  995. size_t cur_buf_size = 0;
  996. struct ggml_tensor * first = ggml_get_first_tensor(ctx);
  997. for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  998. size_t this_size = 0;
  999. if (t->data == NULL && t->view_src == NULL) {
  1000. this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
  1001. }
  1002. if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
  1003. // allocate tensors in the current buffer
  1004. if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
  1005. return NULL;
  1006. }
  1007. first = t;
  1008. cur_buf_size = this_size;
  1009. } else {
  1010. cur_buf_size += this_size;
  1011. }
  1012. }
  1013. // allocate remaining tensors
  1014. if (cur_buf_size > 0) {
  1015. if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
  1016. return NULL;
  1017. }
  1018. }
  1019. if (n_buffers == 0) {
  1020. #ifndef NDEBUG
  1021. GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
  1022. #endif
  1023. return NULL;
  1024. }
  1025. ggml_backend_buffer_t buffer;
  1026. if (n_buffers == 1) {
  1027. buffer = buffers[0];
  1028. } else {
  1029. buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
  1030. }
  1031. free(buffers);
  1032. return buffer;
  1033. }
  1034. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
  1035. return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend));
  1036. }