ggml-alloc.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. #include "ggml-alloc.h"
  2. #include "ggml-backend-impl.h"
  3. #include "ggml.h"
  4. #include "ggml-impl.h"
  5. #include <assert.h>
  6. #include <limits.h>
  7. #include <stdarg.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  12. #define MAX_FREE_BLOCKS 256
  13. //#define GGML_ALLOCATOR_DEBUG
  14. //#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
  15. #define AT_PRINTF(...)
  16. // TODO: GGML_PAD ?
  17. static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
  18. assert(alignment && !(alignment & (alignment - 1))); // power of 2
  19. size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
  20. return offset + align;
  21. }
  22. struct free_block {
  23. void * addr;
  24. size_t size;
  25. };
  26. struct ggml_tallocr {
  27. struct ggml_backend_buffer * buffer;
  28. bool buffer_owned;
  29. void * base;
  30. size_t alignment;
  31. int n_free_blocks;
  32. struct free_block free_blocks[MAX_FREE_BLOCKS];
  33. size_t max_size;
  34. bool measure;
  35. #ifdef GGML_ALLOCATOR_DEBUG
  36. struct ggml_tensor * allocated_tensors[1024];
  37. #endif
  38. };
  39. #ifdef GGML_ALLOCATOR_DEBUG
  40. static void add_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
  41. for (int i = 0; i < 1024; i++) {
  42. if (alloc->allocated_tensors[i] == NULL) {
  43. alloc->allocated_tensors[i] = tensor;
  44. return;
  45. }
  46. }
  47. GGML_ASSERT(!"out of allocated_tensors");
  48. }
  49. static void remove_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
  50. for (int i = 0; i < 1024; i++) {
  51. if (alloc->allocated_tensors[i] == tensor ||
  52. (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
  53. alloc->allocated_tensors[i] = NULL;
  54. return;
  55. }
  56. }
  57. printf("tried to free tensor %s not found\n", tensor->name);
  58. GGML_ASSERT(!"tensor not found");
  59. }
  60. #endif
  61. // check if a tensor is allocated by this buffer
  62. static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) {
  63. return tensor->buffer == alloc->buffer && (!tensor->view_src || tensor->view_src->buffer == alloc->buffer);
  64. }
  65. static bool ggml_is_view(struct ggml_tensor * t) {
  66. return t->view_src != NULL;
  67. }
  68. void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
  69. GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources
  70. GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
  71. size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
  72. size = aligned_offset(NULL, size, alloc->alignment);
  73. AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
  74. size_t max_avail = 0;
  75. // find the best fitting free block besides the last block
  76. int best_fit_block = -1;
  77. size_t best_fit_size = SIZE_MAX;
  78. for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
  79. struct free_block * block = &alloc->free_blocks[i];
  80. max_avail = MAX(max_avail, block->size);
  81. if (block->size >= size && block->size <= best_fit_size) {
  82. best_fit_block = i;
  83. best_fit_size = block->size;
  84. }
  85. }
  86. if (best_fit_block == -1) {
  87. // the last block is our last resort
  88. struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
  89. max_avail = MAX(max_avail, block->size);
  90. if (block->size >= size) {
  91. best_fit_block = alloc->n_free_blocks - 1;
  92. } else {
  93. fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, largest block available %zu)\n",
  94. __func__, tensor->name, size, max_avail);
  95. GGML_ASSERT(!"not enough space in the buffer");
  96. return;
  97. }
  98. }
  99. struct free_block * block = &alloc->free_blocks[best_fit_block];
  100. void * addr = block->addr;
  101. block->addr = (char*)block->addr + size;
  102. block->size -= size;
  103. if (block->size == 0) {
  104. // remove block if empty
  105. alloc->n_free_blocks--;
  106. for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
  107. alloc->free_blocks[j] = alloc->free_blocks[j+1];
  108. }
  109. }
  110. AT_PRINTF("block %d, addr %p\n", best_fit_block, addr);
  111. tensor->data = addr;
  112. tensor->buffer = alloc->buffer;
  113. if (!alloc->measure) {
  114. ggml_backend_buffer_init_tensor(alloc->buffer, tensor);
  115. }
  116. #ifdef GGML_ALLOCATOR_DEBUG
  117. add_allocated_tensor(alloc, tensor);
  118. size_t cur_max = (char*)addr - (char*)alloc->base + size;
  119. if (cur_max > alloc->max_size) {
  120. printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
  121. for (int i = 0; i < 1024; i++) {
  122. if (alloc->allocated_tensors[i]) {
  123. printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
  124. }
  125. }
  126. printf("\n");
  127. }
  128. #endif
  129. alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size);
  130. }
  131. // this is a very naive implementation, but for our case the number of free blocks should be very small
  132. static void ggml_tallocr_free_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
  133. if (ggml_tallocr_is_own(alloc, tensor) == false) {
  134. // the tensor was not allocated in this buffer
  135. // this can happen because the graph allocator will try to free weights and other tensors from different buffers
  136. // the easiest way to deal with this is just to ignore it
  137. // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer);
  138. return;
  139. }
  140. void * ptr = tensor->data;
  141. size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
  142. size = aligned_offset(NULL, size, alloc->alignment);
  143. AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
  144. #ifdef GGML_ALLOCATOR_DEBUG
  145. remove_allocated_tensor(alloc, tensor);
  146. #endif
  147. // see if we can merge with an existing block
  148. for (int i = 0; i < alloc->n_free_blocks; i++) {
  149. struct free_block * block = &alloc->free_blocks[i];
  150. // check if ptr is at the end of the block
  151. if ((char*)block->addr + block->size == ptr) {
  152. block->size += size;
  153. // check if we can merge with the next block
  154. if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
  155. block->size += alloc->free_blocks[i+1].size;
  156. alloc->n_free_blocks--;
  157. for (int j = i+1; j < alloc->n_free_blocks; j++) {
  158. alloc->free_blocks[j] = alloc->free_blocks[j+1];
  159. }
  160. }
  161. return;
  162. }
  163. // check if ptr is at the beginning of the block
  164. if ((char*)ptr + size == block->addr) {
  165. block->addr = ptr;
  166. block->size += size;
  167. // check if we can merge with the previous block
  168. if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
  169. alloc->free_blocks[i-1].size += block->size;
  170. alloc->n_free_blocks--;
  171. for (int j = i; j < alloc->n_free_blocks; j++) {
  172. alloc->free_blocks[j] = alloc->free_blocks[j+1];
  173. }
  174. }
  175. return;
  176. }
  177. }
  178. // otherwise, add a new block
  179. GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
  180. // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
  181. int insert_pos = 0;
  182. while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
  183. insert_pos++;
  184. }
  185. // shift all blocks from insert_pos onward to make room for the new block
  186. for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
  187. alloc->free_blocks[i] = alloc->free_blocks[i-1];
  188. }
  189. // insert the new block
  190. alloc->free_blocks[insert_pos].addr = ptr;
  191. alloc->free_blocks[insert_pos].size = size;
  192. alloc->n_free_blocks++;
  193. }
  194. void ggml_tallocr_reset(ggml_tallocr_t alloc) {
  195. alloc->n_free_blocks = 1;
  196. size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment);
  197. alloc->free_blocks[0].addr = (char *)alloc->base + align_offset;
  198. if (alloc->measure) {
  199. alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
  200. } else {
  201. alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
  202. ggml_backend_buffer_reset(alloc->buffer);
  203. }
  204. }
  205. ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment) {
  206. struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(data, size);
  207. ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
  208. *alloc = (struct ggml_tallocr) {
  209. /*.buffer = */ buffer,
  210. /*.buffer_owned = */ true,
  211. /*.base = */ ggml_backend_buffer_get_base(buffer),
  212. /*.alignment = */ alignment,
  213. /*.n_free_blocks = */ 0,
  214. /*.free_blocks = */ {{0}},
  215. /*.max_size = */ 0,
  216. /*.measure = */ false,
  217. #ifdef GGML_ALLOCATOR_DEBUG
  218. /*.allocated_tensors = */ {0},
  219. #endif
  220. };
  221. ggml_tallocr_reset(alloc);
  222. return alloc;
  223. }
  224. ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment) {
  225. ggml_tallocr_t alloc = ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment);
  226. alloc->measure = true;
  227. return alloc;
  228. }
  229. ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft) {
  230. // create a backend buffer to get the correct tensor allocation sizes
  231. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, 1);
  232. // TODO: move alloc initialization to a common ggml_tallocr_new_impl function
  233. ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
  234. alloc->buffer_owned = true;
  235. alloc->measure = true;
  236. ggml_tallocr_reset(alloc);
  237. return alloc;
  238. }
  239. ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) {
  240. return ggml_tallocr_new_measure_from_buft(ggml_backend_get_default_buffer_type(backend));
  241. }
  242. ggml_tallocr_t ggml_tallocr_new_from_buft(struct ggml_backend_buffer_type * buft, size_t size) {
  243. // create a backend buffer to get the correct tensor allocation sizes
  244. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
  245. ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
  246. alloc->buffer_owned = true;
  247. return alloc;
  248. }
  249. ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) {
  250. return ggml_tallocr_new_from_buft(ggml_backend_get_default_buffer_type(backend), size);
  251. }
  252. ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer) {
  253. ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
  254. *alloc = (struct ggml_tallocr) {
  255. /*.buffer = */ buffer,
  256. /*.buffer_owned = */ false,
  257. /*.base = */ ggml_backend_buffer_get_base(buffer),
  258. /*.alignment = */ ggml_backend_buffer_get_alignment(buffer),
  259. /*.n_free_blocks = */ 0,
  260. /*.free_blocks = */ {{0}},
  261. /*.max_size = */ 0,
  262. /*.measure = */ false,
  263. #ifdef GGML_ALLOCATOR_DEBUG
  264. /*.allocated_tensors = */ {0},
  265. #endif
  266. };
  267. ggml_tallocr_reset(alloc);
  268. return alloc;
  269. }
  270. struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t alloc) {
  271. return alloc->buffer;
  272. }
  273. void ggml_tallocr_free(ggml_tallocr_t alloc) {
  274. if (alloc == NULL) {
  275. return;
  276. }
  277. if (alloc->buffer_owned) {
  278. ggml_backend_buffer_free(alloc->buffer);
  279. }
  280. free(alloc);
  281. }
  282. bool ggml_tallocr_is_measure(ggml_tallocr_t alloc) {
  283. return alloc->measure;
  284. }
  285. size_t ggml_tallocr_max_size(ggml_tallocr_t alloc) {
  286. // FIXME: changes in the tensor sizes compared to the measure graph may cause allocations to fail
  287. // to avoid this, we add a 10% margin to the buffer size
  288. return alloc->max_size + alloc->max_size/10;
  289. }
  290. // graph allocator
  291. struct hash_node {
  292. int n_children;
  293. int n_views;
  294. };
  295. struct ggml_gallocr {
  296. ggml_tallocr_t talloc;
  297. struct ggml_hash_set hash_set;
  298. struct hash_node * hash_values;
  299. size_t hash_values_size;
  300. ggml_tallocr_t * hash_allocs;
  301. int * parse_seq;
  302. int parse_seq_len;
  303. };
  304. ggml_gallocr_t ggml_gallocr_new(void) {
  305. ggml_gallocr_t galloc = (ggml_gallocr_t)malloc(sizeof(struct ggml_gallocr));
  306. *galloc = (struct ggml_gallocr) {
  307. /*.talloc = */ NULL,
  308. /*.hash_set = */ {0},
  309. /*.hash_values = */ NULL,
  310. /*.hash_values_size = */ 0,
  311. /*.hash_allocs = */ NULL,
  312. /*.parse_seq = */ NULL,
  313. /*.parse_seq_len = */ 0,
  314. };
  315. return galloc;
  316. }
  317. void ggml_gallocr_free(ggml_gallocr_t galloc) {
  318. if (galloc == NULL) {
  319. return;
  320. }
  321. if (galloc->hash_set.keys != NULL) {
  322. free(galloc->hash_set.keys);
  323. }
  324. if (galloc->hash_values != NULL) {
  325. free(galloc->hash_values);
  326. }
  327. if (galloc->hash_allocs != NULL) {
  328. free(galloc->hash_allocs);
  329. }
  330. if (galloc->parse_seq != NULL) {
  331. free(galloc->parse_seq);
  332. }
  333. free(galloc);
  334. }
  335. void ggml_gallocr_set_parse_seq(ggml_gallocr_t galloc, const int * list, int n) {
  336. free(galloc->parse_seq);
  337. galloc->parse_seq = malloc(sizeof(int) * n);
  338. for (int i = 0; i < n; i++) {
  339. galloc->parse_seq[i] = list[i];
  340. }
  341. galloc->parse_seq_len = n;
  342. }
  343. static struct hash_node * hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
  344. size_t i = ggml_hash_find_or_insert(galloc->hash_set, t);
  345. return &galloc->hash_values[i];
  346. }
  347. static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
  348. if (a->type != b->type) {
  349. return false;
  350. }
  351. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  352. if (a->ne[i] != b->ne[i]) {
  353. return false;
  354. }
  355. if (a->nb[i] != b->nb[i]) {
  356. return false;
  357. }
  358. }
  359. return true;
  360. }
  361. static bool ggml_op_can_inplace(enum ggml_op op) {
  362. switch (op) {
  363. case GGML_OP_SCALE:
  364. case GGML_OP_DIAG_MASK_ZERO:
  365. case GGML_OP_DIAG_MASK_INF:
  366. case GGML_OP_ADD:
  367. case GGML_OP_ADD1:
  368. case GGML_OP_SUB:
  369. case GGML_OP_MUL:
  370. case GGML_OP_DIV:
  371. case GGML_OP_SQR:
  372. case GGML_OP_SQRT:
  373. case GGML_OP_LOG:
  374. case GGML_OP_UNARY:
  375. case GGML_OP_ROPE:
  376. case GGML_OP_RMS_NORM:
  377. case GGML_OP_SOFT_MAX:
  378. return true;
  379. default:
  380. return false;
  381. }
  382. }
  383. static ggml_tallocr_t node_tallocr(ggml_gallocr_t galloc, struct ggml_tensor * node) {
  384. if (galloc->talloc != NULL) {
  385. return galloc->talloc;
  386. }
  387. return galloc->hash_allocs[ggml_hash_find_or_insert(galloc->hash_set, node)];
  388. }
  389. static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool update_backend) {
  390. ggml_tallocr_t alloc = node_tallocr(galloc, view);
  391. GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
  392. if (update_backend) {
  393. view->backend = view->view_src->backend;
  394. }
  395. // views are initialized in the alloc buffer rather than the view_src buffer
  396. view->buffer = alloc->buffer;
  397. view->data = (char *)view->view_src->data + view->view_offs;
  398. assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft);
  399. if (!alloc->measure) {
  400. ggml_backend_buffer_init_tensor(alloc->buffer, view);
  401. }
  402. }
  403. static void allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
  404. ggml_tallocr_t alloc = node_tallocr(galloc, node);
  405. if (node->data == NULL) {
  406. if (ggml_is_view(node)) {
  407. init_view(galloc, node, true);
  408. } else {
  409. // see if we can reuse a parent's buffer (inplace)
  410. if (ggml_op_can_inplace(node->op)) {
  411. for (int i = 0; i < GGML_MAX_SRC; i++) {
  412. struct ggml_tensor * parent = node->src[i];
  413. if (parent == NULL) {
  414. break;
  415. }
  416. // if the node's data is external, then we cannot re-use it
  417. if (ggml_tallocr_is_own(alloc, parent) == false) {
  418. AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
  419. continue;
  420. }
  421. struct hash_node * p_hn = hash_get(galloc, parent);
  422. if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
  423. if (ggml_is_view(parent)) {
  424. struct ggml_tensor * view_src = parent->view_src;
  425. struct hash_node * view_src_hn = hash_get(galloc, view_src);
  426. if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
  427. // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
  428. // the parent's data that it will need later (same layout requirement). the problem is that then
  429. // we cannot free the tensor because the original address of the allocation is lost.
  430. // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
  431. // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
  432. AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
  433. node->view_src = view_src;
  434. view_src_hn->n_views += 1;
  435. init_view(galloc, node, false);
  436. return;
  437. }
  438. } else {
  439. AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
  440. node->view_src = parent;
  441. p_hn->n_views += 1;
  442. init_view(galloc, node, false);
  443. return;
  444. }
  445. }
  446. }
  447. }
  448. ggml_tallocr_alloc(alloc, node);
  449. }
  450. }
  451. }
  452. static void free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
  453. ggml_tallocr_t alloc = node_tallocr(galloc, node);
  454. ggml_tallocr_free_tensor(alloc, node);
  455. }
  456. static void ggml_tallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * gf) {
  457. const int * parse_seq = galloc->parse_seq;
  458. int parse_seq_len = galloc->parse_seq_len;
  459. // count number of children and views
  460. for (int i = 0; i < gf->n_nodes; i++) {
  461. struct ggml_tensor * node = gf->nodes[i];
  462. if (ggml_is_view(node)) {
  463. struct ggml_tensor * view_src = node->view_src;
  464. hash_get(galloc, view_src)->n_views += 1;
  465. if (node->buffer == NULL && node->data != NULL) {
  466. // view of a pre-allocated tensor, didn't call init_view() yet
  467. init_view(galloc, node, true);
  468. }
  469. }
  470. for (int j = 0; j < GGML_MAX_SRC; j++) {
  471. struct ggml_tensor * parent = node->src[j];
  472. if (parent == NULL) {
  473. break;
  474. }
  475. hash_get(galloc, parent)->n_children += 1;
  476. if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) {
  477. init_view(galloc, parent, true);
  478. }
  479. }
  480. }
  481. // allocate tensors
  482. // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
  483. int last_barrier_pos = 0;
  484. int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes;
  485. for (int ind = 0; ind < n_nodes; ind++) {
  486. // allocate a node if there is no parse_seq or this is not a barrier
  487. if (parse_seq_len == 0 || parse_seq[ind] != -1) {
  488. int i = parse_seq_len ? parse_seq[ind] : ind;
  489. struct ggml_tensor * node = gf->nodes[i];
  490. // allocate parents (leafs)
  491. for (int j = 0; j < GGML_MAX_SRC; j++) {
  492. struct ggml_tensor * parent = node->src[j];
  493. if (parent == NULL) {
  494. break;
  495. }
  496. allocate_node(galloc, parent);
  497. }
  498. // allocate node
  499. allocate_node(galloc, node);
  500. AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
  501. for (int j = 0; j < GGML_MAX_SRC; j++) {
  502. struct ggml_tensor * parent = node->src[j];
  503. if (parent == NULL) {
  504. break;
  505. }
  506. AT_PRINTF("%s", parent->name);
  507. if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
  508. AT_PRINTF(", ");
  509. }
  510. }
  511. AT_PRINTF("\n");
  512. }
  513. // update parents
  514. // update immediately if there is no parse_seq
  515. // update only at barriers if there is parse_seq
  516. if ((parse_seq_len == 0) || parse_seq[ind] == -1) {
  517. int update_start = parse_seq_len ? last_barrier_pos : ind;
  518. int update_end = parse_seq_len ? ind : ind + 1;
  519. for (int i = update_start; i < update_end; i++) {
  520. int node_i = parse_seq_len ? parse_seq[i] : i;
  521. struct ggml_tensor * node = gf->nodes[node_i];
  522. for (int j = 0; j < GGML_MAX_SRC; j++) {
  523. struct ggml_tensor * parent = node->src[j];
  524. if (parent == NULL) {
  525. break;
  526. }
  527. struct hash_node * p_hn = hash_get(galloc, parent);
  528. p_hn->n_children -= 1;
  529. //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
  530. if (p_hn->n_children == 0 && p_hn->n_views == 0) {
  531. if (ggml_is_view(parent)) {
  532. struct ggml_tensor * view_src = parent->view_src;
  533. struct hash_node * view_src_hn = hash_get(galloc, view_src);
  534. view_src_hn->n_views -= 1;
  535. AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
  536. if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) {
  537. free_node(galloc, view_src);
  538. }
  539. }
  540. else {
  541. free_node(galloc, parent);
  542. }
  543. }
  544. }
  545. }
  546. AT_PRINTF("\n");
  547. if (parse_seq_len) {
  548. last_barrier_pos = ind + 1;
  549. }
  550. }
  551. }
  552. }
  553. size_t ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, ggml_tallocr_t talloc, struct ggml_cgraph * graph) {
  554. size_t hash_size = graph->visited_hash_table.size;
  555. // check if the hash table is initialized and large enough
  556. if (galloc->hash_set.size < hash_size) {
  557. if (galloc->hash_set.keys != NULL) {
  558. free(galloc->hash_set.keys);
  559. }
  560. if (galloc->hash_values != NULL) {
  561. free(galloc->hash_values);
  562. }
  563. galloc->hash_set.keys = malloc(sizeof(struct ggml_tensor *) * hash_size);
  564. galloc->hash_set.size = hash_size;
  565. galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
  566. }
  567. // reset hash table
  568. memset(galloc->hash_set.keys, 0, sizeof(struct ggml_tensor *) * hash_size);
  569. memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
  570. galloc->talloc = talloc;
  571. ggml_tallocr_alloc_graph_impl(galloc, graph);
  572. galloc->talloc = NULL;
  573. size_t max_size = ggml_tallocr_max_size(talloc);
  574. return max_size;
  575. }
  576. void ggml_gallocr_alloc_graph_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, struct ggml_hash_set hash_set, ggml_tallocr_t * hash_node_talloc) {
  577. const size_t hash_size = hash_set.size;
  578. GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs));
  579. galloc->talloc = NULL;
  580. // alloc hash_values if needed
  581. if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) {
  582. free(galloc->hash_values);
  583. galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
  584. galloc->hash_values_size = hash_size;
  585. }
  586. // free hash_set.keys if needed
  587. if (galloc->hash_set.keys != NULL) {
  588. free(galloc->hash_set.keys);
  589. }
  590. galloc->hash_set = hash_set;
  591. // reset hash values
  592. memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
  593. galloc->hash_allocs = hash_node_talloc;
  594. ggml_tallocr_alloc_graph_impl(galloc, graph);
  595. // remove unowned resources
  596. galloc->hash_set.keys = NULL;
  597. galloc->hash_allocs = NULL;
  598. }
  599. // legacy API wrapper
  600. struct ggml_allocr {
  601. ggml_tallocr_t talloc;
  602. ggml_gallocr_t galloc;
  603. };
  604. static ggml_allocr_t ggml_allocr_new_impl(ggml_tallocr_t talloc) {
  605. ggml_allocr_t alloc = (ggml_allocr_t)malloc(sizeof(struct ggml_allocr));
  606. *alloc = (struct ggml_allocr) {
  607. /*.talloc = */ talloc,
  608. /*.galloc = */ ggml_gallocr_new(),
  609. };
  610. return alloc;
  611. }
  612. ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment) {
  613. return ggml_allocr_new_impl(ggml_tallocr_new(data, size, alignment));
  614. }
  615. ggml_allocr_t ggml_allocr_new_measure(size_t alignment) {
  616. return ggml_allocr_new_impl(ggml_tallocr_new_measure(alignment));
  617. }
  618. ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) {
  619. return ggml_allocr_new_impl(ggml_tallocr_new_from_buffer(buffer));
  620. }
  621. ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size) {
  622. return ggml_allocr_new_impl(ggml_tallocr_new_from_backend(backend, size));
  623. }
  624. ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend) {
  625. return ggml_allocr_new_impl(ggml_tallocr_new_measure_from_backend(backend));
  626. }
  627. struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc) {
  628. return ggml_tallocr_get_buffer(alloc->talloc);
  629. }
  630. void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n) {
  631. ggml_gallocr_set_parse_seq(alloc->galloc, list, n);
  632. }
  633. void ggml_allocr_free(ggml_allocr_t alloc) {
  634. if (alloc == NULL) {
  635. return;
  636. }
  637. ggml_gallocr_free(alloc->galloc);
  638. ggml_tallocr_free(alloc->talloc);
  639. free(alloc);
  640. }
  641. bool ggml_allocr_is_measure(ggml_allocr_t alloc) {
  642. return ggml_tallocr_is_measure(alloc->talloc);
  643. }
  644. void ggml_allocr_reset(ggml_allocr_t alloc) {
  645. ggml_tallocr_reset(alloc->talloc);
  646. }
  647. void ggml_allocr_alloc(ggml_allocr_t alloc, struct ggml_tensor * tensor) {
  648. ggml_tallocr_alloc(alloc->talloc, tensor);
  649. }
  650. size_t ggml_allocr_max_size(ggml_allocr_t alloc) {
  651. return ggml_tallocr_max_size(alloc->talloc);
  652. }
  653. size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph) {
  654. return ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
  655. }
  656. // utils
  657. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
  658. GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
  659. size_t alignment = ggml_backend_buft_get_alignment(buft);
  660. size_t nbytes = 0;
  661. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  662. if (t->data == NULL && t->view_src == NULL) {
  663. nbytes += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
  664. }
  665. }
  666. if (nbytes == 0) {
  667. // all the tensors in the context are already allocated
  668. #ifndef NDEBUG
  669. fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__);
  670. #endif
  671. return NULL;
  672. }
  673. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes);
  674. if (buffer == NULL) {
  675. // failed to allocate buffer
  676. #ifndef NDEBUG
  677. fprintf(stderr, "%s: failed to allocate buffer\n", __func__);
  678. #endif
  679. return NULL;
  680. }
  681. ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer);
  682. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  683. if (t->data == NULL) {
  684. if (t->view_src == NULL) {
  685. ggml_tallocr_alloc(tallocr, t);
  686. } else {
  687. ggml_backend_view_init(buffer, t);
  688. }
  689. } else {
  690. if (t->view_src != NULL) {
  691. // view of a pre-allocated tensor
  692. ggml_backend_view_init(buffer, t);
  693. }
  694. }
  695. }
  696. ggml_tallocr_free(tallocr);
  697. return buffer;
  698. }
  699. ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
  700. return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend));
  701. }