ggml.h 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph gf = ggml_build_forward(f);
  61. //
  62. // // set the input variable and parameter values
  63. // ggml_set_f32(x, 2.0f);
  64. // ggml_set_f32(a, 3.0f);
  65. // ggml_set_f32(b, 4.0f);
  66. //
  67. // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
  68. //
  69. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  70. //
  71. // ...
  72. // }
  73. //
  74. // The actual computation is performed in the ggml_graph_compute() function.
  75. //
  76. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  77. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  78. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  79. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  80. // actually needed.
  81. //
  82. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  83. // differentiation and optimization algorithms.
  84. //
  85. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  86. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  87. // the user can avoid the memory allocation overhead at runtime.
  88. //
  89. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  90. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  91. //
  92. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  93. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  94. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  95. // yet, but a few examples are demonstrated in the following operations:
  96. //
  97. // - ggml_permute()
  98. // - ggml_conv_1d_1s()
  99. // - ggml_conv_1d_2s()
  100. //
  101. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  102. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  103. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  104. // calculus class, or watch the following video:
  105. //
  106. // What is Automatic Differentiation?
  107. // https://www.youtube.com/watch?v=wG_nF1awSSY
  108. //
  109. //
  110. // ## Tensor data (struct ggml_tensor)
  111. //
  112. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  113. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  114. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  115. //
  116. // {
  117. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  118. //
  119. // assert(c->src[0] == a);
  120. // assert(c->src[1] == b);
  121. // }
  122. //
  123. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  124. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  125. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  126. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  127. // contiguous in memory.
  128. //
  129. // The data of the tensor is accessed via the "data" pointer. For example:
  130. //
  131. // {
  132. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
  133. //
  134. // // a[2, 1] = 1.0f;
  135. // *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
  136. //
  137. // // a[0, 2] = 2.0f;
  138. // *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
  139. //
  140. // ...
  141. // }
  142. //
  143. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  144. //
  145. // ## The matrix multiplication operator (ggml_mul_mat)
  146. //
  147. // TODO
  148. //
  149. //
  150. // ## Multi-threading
  151. //
  152. // TODO
  153. //
  154. //
  155. // ## Overview of ggml.c
  156. //
  157. // TODO
  158. //
  159. //
  160. // ## SIMD optimizations
  161. //
  162. // TODO
  163. //
  164. //
  165. // ## Debugging ggml
  166. //
  167. // TODO
  168. //
  169. //
  170. #ifdef GGML_SHARED
  171. # if defined(_WIN32) && !defined(__MINGW32__)
  172. # ifdef GGML_BUILD
  173. # define GGML_API __declspec(dllexport)
  174. # else
  175. # define GGML_API __declspec(dllimport)
  176. # endif
  177. # else
  178. # define GGML_API __attribute__ ((visibility ("default")))
  179. # endif
  180. #else
  181. # define GGML_API
  182. #endif
  183. // TODO: support for clang
  184. #ifdef __GNUC__
  185. # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  186. #elif defined(_MSC_VER)
  187. # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  188. #else
  189. # define GGML_DEPRECATED(func, hint) func
  190. #endif
  191. #include <stdint.h>
  192. #include <stddef.h>
  193. #include <stdbool.h>
  194. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  195. #define GGML_FILE_VERSION 1
  196. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  197. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  198. #define GGML_MAX_DIMS 4
  199. #define GGML_MAX_NODES 4096
  200. #define GGML_MAX_PARAMS 256
  201. #define GGML_MAX_CONTEXTS 64
  202. #define GGML_MAX_SRC 6
  203. #define GGML_MAX_NAME 64
  204. #define GGML_MAX_OP_PARAMS 32
  205. #define GGML_DEFAULT_N_THREADS 4
  206. #define GGML_EXIT_SUCCESS 0
  207. #define GGML_EXIT_ABORTED 1
  208. #define GGUF_MAGIC 0x46554747 // "GGUF"
  209. #define GGUF_VERSION 1
  210. #define GGUF_DEFAULT_ALIGNMENT 32
  211. #define GGML_UNUSED(x) (void)(x)
  212. #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
  213. #define GGML_ASSERT(x) \
  214. do { \
  215. if (!(x)) { \
  216. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  217. abort(); \
  218. } \
  219. } while (0)
  220. // used to copy the number of elements and stride in bytes of tensors into local variables.
  221. // main purpose is to reduce code duplication and improve readability.
  222. //
  223. // example:
  224. //
  225. // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  226. // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  227. //
  228. #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
  229. const type prefix##0 = (pointer)->array[0]; \
  230. GGML_UNUSED(prefix##0);
  231. #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
  232. GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
  233. const type prefix##1 = (pointer)->array[1]; \
  234. GGML_UNUSED(prefix##1);
  235. #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
  236. GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
  237. const type prefix##2 = (pointer)->array[2]; \
  238. GGML_UNUSED(prefix##2);
  239. #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
  240. GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
  241. const type prefix##3 = (pointer)->array[3]; \
  242. GGML_UNUSED(prefix##3);
  243. #ifdef __cplusplus
  244. extern "C" {
  245. #endif
  246. #ifdef __ARM_NEON
  247. // we use the built-in 16-bit float type
  248. typedef __fp16 ggml_fp16_t;
  249. #else
  250. typedef uint16_t ggml_fp16_t;
  251. #endif
  252. // convert FP16 <-> FP32
  253. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  254. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  255. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
  256. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
  257. struct ggml_object;
  258. struct ggml_context;
  259. enum ggml_type {
  260. GGML_TYPE_F32 = 0,
  261. GGML_TYPE_F16 = 1,
  262. GGML_TYPE_Q4_0 = 2,
  263. GGML_TYPE_Q4_1 = 3,
  264. // GGML_TYPE_Q4_2 = 4, support has been removed
  265. // GGML_TYPE_Q4_3 (5) support has been removed
  266. GGML_TYPE_Q5_0 = 6,
  267. GGML_TYPE_Q5_1 = 7,
  268. GGML_TYPE_Q8_0 = 8,
  269. GGML_TYPE_Q8_1 = 9,
  270. // k-quantizations
  271. GGML_TYPE_Q2_K = 10,
  272. GGML_TYPE_Q3_K = 11,
  273. GGML_TYPE_Q4_K = 12,
  274. GGML_TYPE_Q5_K = 13,
  275. GGML_TYPE_Q6_K = 14,
  276. GGML_TYPE_Q8_K = 15,
  277. GGML_TYPE_I8,
  278. GGML_TYPE_I16,
  279. GGML_TYPE_I32,
  280. GGML_TYPE_COUNT,
  281. };
  282. enum ggml_backend {
  283. GGML_BACKEND_CPU = 0,
  284. GGML_BACKEND_GPU = 10,
  285. GGML_BACKEND_GPU_SPLIT = 20,
  286. };
  287. // model file types
  288. enum ggml_ftype {
  289. GGML_FTYPE_UNKNOWN = -1,
  290. GGML_FTYPE_ALL_F32 = 0,
  291. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  292. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  293. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  294. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  295. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  296. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  297. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  298. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  299. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  300. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  301. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  302. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  303. };
  304. // available tensor operations:
  305. enum ggml_op {
  306. GGML_OP_NONE = 0,
  307. GGML_OP_DUP,
  308. GGML_OP_ADD,
  309. GGML_OP_ADD1,
  310. GGML_OP_ACC,
  311. GGML_OP_SUB,
  312. GGML_OP_MUL,
  313. GGML_OP_DIV,
  314. GGML_OP_SQR,
  315. GGML_OP_SQRT,
  316. GGML_OP_LOG,
  317. GGML_OP_SUM,
  318. GGML_OP_SUM_ROWS,
  319. GGML_OP_MEAN,
  320. GGML_OP_ARGMAX,
  321. GGML_OP_REPEAT,
  322. GGML_OP_REPEAT_BACK,
  323. GGML_OP_SILU_BACK,
  324. GGML_OP_NORM, // normalize
  325. GGML_OP_RMS_NORM,
  326. GGML_OP_RMS_NORM_BACK,
  327. GGML_OP_MUL_MAT,
  328. GGML_OP_OUT_PROD,
  329. GGML_OP_SCALE,
  330. GGML_OP_SET,
  331. GGML_OP_CPY,
  332. GGML_OP_CONT,
  333. GGML_OP_RESHAPE,
  334. GGML_OP_VIEW,
  335. GGML_OP_PERMUTE,
  336. GGML_OP_TRANSPOSE,
  337. GGML_OP_GET_ROWS,
  338. GGML_OP_GET_ROWS_BACK,
  339. GGML_OP_DIAG,
  340. GGML_OP_DIAG_MASK_INF,
  341. GGML_OP_DIAG_MASK_ZERO,
  342. GGML_OP_SOFT_MAX,
  343. GGML_OP_SOFT_MAX_BACK,
  344. GGML_OP_ROPE,
  345. GGML_OP_ROPE_BACK,
  346. GGML_OP_ALIBI,
  347. GGML_OP_CLAMP,
  348. GGML_OP_CONV_1D,
  349. GGML_OP_CONV_2D,
  350. GGML_OP_POOL_1D,
  351. GGML_OP_POOL_2D,
  352. GGML_OP_FLASH_ATTN,
  353. GGML_OP_FLASH_FF,
  354. GGML_OP_FLASH_ATTN_BACK,
  355. GGML_OP_WIN_PART,
  356. GGML_OP_WIN_UNPART,
  357. GGML_OP_UNARY,
  358. GGML_OP_MAP_UNARY,
  359. GGML_OP_MAP_BINARY,
  360. GGML_OP_MAP_CUSTOM1_F32,
  361. GGML_OP_MAP_CUSTOM2_F32,
  362. GGML_OP_MAP_CUSTOM3_F32,
  363. GGML_OP_MAP_CUSTOM1,
  364. GGML_OP_MAP_CUSTOM2,
  365. GGML_OP_MAP_CUSTOM3,
  366. GGML_OP_CROSS_ENTROPY_LOSS,
  367. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  368. GGML_OP_COUNT,
  369. };
  370. enum ggml_unary_op {
  371. GGML_UNARY_OP_ABS,
  372. GGML_UNARY_OP_SGN,
  373. GGML_UNARY_OP_NEG,
  374. GGML_UNARY_OP_STEP,
  375. GGML_UNARY_OP_TANH,
  376. GGML_UNARY_OP_ELU,
  377. GGML_UNARY_OP_RELU,
  378. GGML_UNARY_OP_GELU,
  379. GGML_UNARY_OP_GELU_QUICK,
  380. GGML_UNARY_OP_SILU,
  381. };
  382. enum ggml_object_type {
  383. GGML_OBJECT_TENSOR,
  384. GGML_OBJECT_GRAPH,
  385. GGML_OBJECT_WORK_BUFFER
  386. };
  387. // ggml object
  388. struct ggml_object {
  389. size_t offs;
  390. size_t size;
  391. struct ggml_object * next;
  392. enum ggml_object_type type;
  393. char padding[4];
  394. };
  395. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  396. // n-dimensional tensor
  397. struct ggml_tensor {
  398. enum ggml_type type;
  399. enum ggml_backend backend;
  400. int n_dims;
  401. int64_t ne[GGML_MAX_DIMS]; // number of elements
  402. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  403. // nb[0] = sizeof(type)
  404. // nb[1] = nb[0] * ne[0] + padding
  405. // nb[i] = nb[i-1] * ne[i-1]
  406. // compute data
  407. enum ggml_op op;
  408. // op params - allocated as int32_t for alignment
  409. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
  410. bool is_param;
  411. struct ggml_tensor * grad;
  412. struct ggml_tensor * src[GGML_MAX_SRC];
  413. // performance
  414. int perf_runs;
  415. int64_t perf_cycles;
  416. int64_t perf_time_us;
  417. void * data;
  418. char name[GGML_MAX_NAME];
  419. void * extra; // extra things e.g. for ggml-cuda.cu
  420. char padding[4];
  421. };
  422. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  423. // the compute plan that needs to be prepared for ggml_graph_compute()
  424. // since https://github.com/ggerganov/ggml/issues/287
  425. struct ggml_cplan {
  426. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  427. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  428. int n_threads;
  429. // the `n_tasks` of nodes, 1:1 mapping to cgraph nodes
  430. int n_tasks[GGML_MAX_NODES];
  431. // abort ggml_graph_compute when true
  432. bool (*abort_callback)(void * data);
  433. void * abort_callback_data;
  434. };
  435. // next prime after GGML_MAX_NODES
  436. // #define GGML_GRAPH_HASHTABLE_SIZE 4099
  437. // next prime after GGML_MAX_NODES * 2 (nodes + leafs)
  438. #define GGML_GRAPH_HASHTABLE_SIZE 8273
  439. // computation graph
  440. struct ggml_cgraph {
  441. int n_nodes;
  442. int n_leafs;
  443. struct ggml_tensor * nodes[GGML_MAX_NODES];
  444. struct ggml_tensor * grads[GGML_MAX_NODES];
  445. struct ggml_tensor * leafs[GGML_MAX_NODES];
  446. void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE];
  447. // performance
  448. int perf_runs;
  449. int64_t perf_cycles;
  450. int64_t perf_time_us;
  451. };
  452. static const size_t GGML_GRAPH_SIZE = sizeof(struct ggml_cgraph);
  453. // scratch buffer
  454. struct ggml_scratch {
  455. size_t offs;
  456. size_t size;
  457. void * data;
  458. };
  459. struct ggml_init_params {
  460. // memory pool
  461. size_t mem_size; // bytes
  462. void * mem_buffer; // if NULL, memory will be allocated internally
  463. bool no_alloc; // don't allocate memory for the tensor data
  464. };
  465. // compute types
  466. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  467. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  468. enum ggml_task_type {
  469. GGML_TASK_INIT = 0,
  470. GGML_TASK_COMPUTE,
  471. GGML_TASK_FINALIZE,
  472. };
  473. struct ggml_compute_params {
  474. enum ggml_task_type type;
  475. // ith = thread index, nth = number of threads
  476. int ith, nth;
  477. // work buffer for all threads
  478. size_t wsize;
  479. void * wdata;
  480. };
  481. // misc
  482. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  483. GGML_API int64_t ggml_time_ms(void);
  484. GGML_API int64_t ggml_time_us(void);
  485. GGML_API int64_t ggml_cycles(void);
  486. GGML_API int64_t ggml_cycles_per_ms(void);
  487. GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
  488. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  489. GGML_API void ggml_print_object (const struct ggml_object * obj);
  490. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  491. GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
  492. GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
  493. GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
  494. GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
  495. GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
  496. GGML_API int ggml_blck_size (enum ggml_type type);
  497. GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
  498. GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
  499. GGML_API const char * ggml_type_name(enum ggml_type type);
  500. GGML_API const char * ggml_op_name (enum ggml_op op);
  501. GGML_API const char * ggml_op_symbol(enum ggml_op op);
  502. GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
  503. GGML_API bool ggml_is_quantized(enum ggml_type type);
  504. // TODO: temporary until model loading of ggml examples is refactored
  505. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  506. GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
  507. GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  508. GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
  509. GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
  510. // use this to compute the memory overhead of a tensor
  511. GGML_API size_t ggml_tensor_overhead(void);
  512. // main
  513. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  514. GGML_API void ggml_free(struct ggml_context * ctx);
  515. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  516. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  517. GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
  518. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  519. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  520. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  521. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  522. GGML_API struct ggml_tensor * ggml_new_tensor(
  523. struct ggml_context * ctx,
  524. enum ggml_type type,
  525. int n_dims,
  526. const int64_t *ne);
  527. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  528. struct ggml_context * ctx,
  529. enum ggml_type type,
  530. int64_t ne0);
  531. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  532. struct ggml_context * ctx,
  533. enum ggml_type type,
  534. int64_t ne0,
  535. int64_t ne1);
  536. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  537. struct ggml_context * ctx,
  538. enum ggml_type type,
  539. int64_t ne0,
  540. int64_t ne1,
  541. int64_t ne2);
  542. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  543. struct ggml_context * ctx,
  544. enum ggml_type type,
  545. int64_t ne0,
  546. int64_t ne1,
  547. int64_t ne2,
  548. int64_t ne3);
  549. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  550. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  551. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  552. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
  553. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  554. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  555. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  556. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  557. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  558. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  559. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  560. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  561. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  562. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  563. GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
  564. GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
  565. GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
  566. GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
  567. //
  568. // operations on tensors with backpropagation
  569. //
  570. GGML_API struct ggml_tensor * ggml_dup(
  571. struct ggml_context * ctx,
  572. struct ggml_tensor * a);
  573. // in-place, returns view(a)
  574. GGML_API struct ggml_tensor * ggml_dup_inplace(
  575. struct ggml_context * ctx,
  576. struct ggml_tensor * a);
  577. GGML_API struct ggml_tensor * ggml_add(
  578. struct ggml_context * ctx,
  579. struct ggml_tensor * a,
  580. struct ggml_tensor * b);
  581. GGML_API struct ggml_tensor * ggml_add_inplace(
  582. struct ggml_context * ctx,
  583. struct ggml_tensor * a,
  584. struct ggml_tensor * b);
  585. GGML_API struct ggml_tensor * ggml_add1(
  586. struct ggml_context * ctx,
  587. struct ggml_tensor * a,
  588. struct ggml_tensor * b);
  589. GGML_API struct ggml_tensor * ggml_add1_inplace(
  590. struct ggml_context * ctx,
  591. struct ggml_tensor * a,
  592. struct ggml_tensor * b);
  593. GGML_API struct ggml_tensor * ggml_acc(
  594. struct ggml_context * ctx,
  595. struct ggml_tensor * a,
  596. struct ggml_tensor * b,
  597. size_t nb1,
  598. size_t nb2,
  599. size_t nb3,
  600. size_t offset);
  601. GGML_API struct ggml_tensor * ggml_acc_inplace(
  602. struct ggml_context * ctx,
  603. struct ggml_tensor * a,
  604. struct ggml_tensor * b,
  605. size_t nb1,
  606. size_t nb2,
  607. size_t nb3,
  608. size_t offset);
  609. GGML_API struct ggml_tensor * ggml_sub(
  610. struct ggml_context * ctx,
  611. struct ggml_tensor * a,
  612. struct ggml_tensor * b);
  613. GGML_API struct ggml_tensor * ggml_sub_inplace(
  614. struct ggml_context * ctx,
  615. struct ggml_tensor * a,
  616. struct ggml_tensor * b);
  617. GGML_API struct ggml_tensor * ggml_mul(
  618. struct ggml_context * ctx,
  619. struct ggml_tensor * a,
  620. struct ggml_tensor * b);
  621. GGML_API struct ggml_tensor * ggml_mul_inplace(
  622. struct ggml_context * ctx,
  623. struct ggml_tensor * a,
  624. struct ggml_tensor * b);
  625. GGML_API struct ggml_tensor * ggml_div(
  626. struct ggml_context * ctx,
  627. struct ggml_tensor * a,
  628. struct ggml_tensor * b);
  629. GGML_API struct ggml_tensor * ggml_div_inplace(
  630. struct ggml_context * ctx,
  631. struct ggml_tensor * a,
  632. struct ggml_tensor * b);
  633. GGML_API struct ggml_tensor * ggml_sqr(
  634. struct ggml_context * ctx,
  635. struct ggml_tensor * a);
  636. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  637. struct ggml_context * ctx,
  638. struct ggml_tensor * a);
  639. GGML_API struct ggml_tensor * ggml_sqrt(
  640. struct ggml_context * ctx,
  641. struct ggml_tensor * a);
  642. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  643. struct ggml_context * ctx,
  644. struct ggml_tensor * a);
  645. GGML_API struct ggml_tensor * ggml_log(
  646. struct ggml_context * ctx,
  647. struct ggml_tensor * a);
  648. GGML_API struct ggml_tensor * ggml_log_inplace(
  649. struct ggml_context * ctx,
  650. struct ggml_tensor * a);
  651. // return scalar
  652. GGML_API struct ggml_tensor * ggml_sum(
  653. struct ggml_context * ctx,
  654. struct ggml_tensor * a);
  655. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  656. GGML_API struct ggml_tensor * ggml_sum_rows(
  657. struct ggml_context * ctx,
  658. struct ggml_tensor * a);
  659. // mean along rows
  660. GGML_API struct ggml_tensor * ggml_mean(
  661. struct ggml_context * ctx,
  662. struct ggml_tensor * a);
  663. // argmax along rows
  664. GGML_API struct ggml_tensor * ggml_argmax(
  665. struct ggml_context * ctx,
  666. struct ggml_tensor * a);
  667. // if a is the same shape as b, and a is not parameter, return a
  668. // otherwise, return a new tensor: repeat(a) to fit in b
  669. GGML_API struct ggml_tensor * ggml_repeat(
  670. struct ggml_context * ctx,
  671. struct ggml_tensor * a,
  672. struct ggml_tensor * b);
  673. GGML_API struct ggml_tensor * ggml_repeat_back(
  674. struct ggml_context * ctx,
  675. struct ggml_tensor * a,
  676. struct ggml_tensor * b);
  677. GGML_API struct ggml_tensor * ggml_abs(
  678. struct ggml_context * ctx,
  679. struct ggml_tensor * a);
  680. GGML_API struct ggml_tensor * ggml_abs_inplace(
  681. struct ggml_context * ctx,
  682. struct ggml_tensor * a);
  683. GGML_API struct ggml_tensor * ggml_sgn(
  684. struct ggml_context * ctx,
  685. struct ggml_tensor * a);
  686. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  687. struct ggml_context * ctx,
  688. struct ggml_tensor * a);
  689. GGML_API struct ggml_tensor * ggml_neg(
  690. struct ggml_context * ctx,
  691. struct ggml_tensor * a);
  692. GGML_API struct ggml_tensor * ggml_neg_inplace(
  693. struct ggml_context * ctx,
  694. struct ggml_tensor * a);
  695. GGML_API struct ggml_tensor * ggml_step(
  696. struct ggml_context * ctx,
  697. struct ggml_tensor * a);
  698. GGML_API struct ggml_tensor * ggml_step_inplace(
  699. struct ggml_context * ctx,
  700. struct ggml_tensor * a);
  701. GGML_API struct ggml_tensor * ggml_tanh(
  702. struct ggml_context * ctx,
  703. struct ggml_tensor * a);
  704. GGML_API struct ggml_tensor * ggml_tanh_inplace(
  705. struct ggml_context * ctx,
  706. struct ggml_tensor * a);
  707. GGML_API struct ggml_tensor * ggml_elu(
  708. struct ggml_context * ctx,
  709. struct ggml_tensor * a);
  710. GGML_API struct ggml_tensor * ggml_elu_inplace(
  711. struct ggml_context * ctx,
  712. struct ggml_tensor * a);
  713. GGML_API struct ggml_tensor * ggml_relu(
  714. struct ggml_context * ctx,
  715. struct ggml_tensor * a);
  716. GGML_API struct ggml_tensor * ggml_relu_inplace(
  717. struct ggml_context * ctx,
  718. struct ggml_tensor * a);
  719. // TODO: double-check this computation is correct
  720. GGML_API struct ggml_tensor * ggml_gelu(
  721. struct ggml_context * ctx,
  722. struct ggml_tensor * a);
  723. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  724. struct ggml_context * ctx,
  725. struct ggml_tensor * a);
  726. GGML_API struct ggml_tensor * ggml_gelu_quick(
  727. struct ggml_context * ctx,
  728. struct ggml_tensor * a);
  729. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  730. struct ggml_context * ctx,
  731. struct ggml_tensor * a);
  732. GGML_API struct ggml_tensor * ggml_silu(
  733. struct ggml_context * ctx,
  734. struct ggml_tensor * a);
  735. GGML_API struct ggml_tensor * ggml_silu_inplace(
  736. struct ggml_context * ctx,
  737. struct ggml_tensor * a);
  738. // a - x
  739. // b - dy
  740. GGML_API struct ggml_tensor * ggml_silu_back(
  741. struct ggml_context * ctx,
  742. struct ggml_tensor * a,
  743. struct ggml_tensor * b);
  744. // normalize along rows
  745. // TODO: eps is hardcoded to 1e-5 for now
  746. GGML_API struct ggml_tensor * ggml_norm(
  747. struct ggml_context * ctx,
  748. struct ggml_tensor * a);
  749. GGML_API struct ggml_tensor * ggml_norm_inplace(
  750. struct ggml_context * ctx,
  751. struct ggml_tensor * a);
  752. GGML_API struct ggml_tensor * ggml_rms_norm(
  753. struct ggml_context * ctx,
  754. struct ggml_tensor * a,
  755. float eps);
  756. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  757. struct ggml_context * ctx,
  758. struct ggml_tensor * a,
  759. float eps);
  760. // a - x
  761. // b - dy
  762. // TODO: update with configurable eps
  763. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  764. struct ggml_context * ctx,
  765. struct ggml_tensor * a,
  766. struct ggml_tensor * b);
  767. // A: n columns, m rows
  768. // B: n columns, p rows (i.e. we transpose it internally)
  769. // result is m columns, p rows
  770. GGML_API struct ggml_tensor * ggml_mul_mat(
  771. struct ggml_context * ctx,
  772. struct ggml_tensor * a,
  773. struct ggml_tensor * b);
  774. // A: m columns, n rows,
  775. // B: p columns, n rows,
  776. // result is m columns, p rows
  777. GGML_API struct ggml_tensor * ggml_out_prod(
  778. struct ggml_context * ctx,
  779. struct ggml_tensor * a,
  780. struct ggml_tensor * b);
  781. //
  782. // operations on tensors without backpropagation
  783. //
  784. GGML_API struct ggml_tensor * ggml_scale(
  785. struct ggml_context * ctx,
  786. struct ggml_tensor * a,
  787. struct ggml_tensor * b);
  788. // in-place, returns view(a)
  789. GGML_API struct ggml_tensor * ggml_scale_inplace(
  790. struct ggml_context * ctx,
  791. struct ggml_tensor * a,
  792. struct ggml_tensor * b);
  793. // b -> view(a,offset,nb1,nb2,3), return modified a
  794. GGML_API struct ggml_tensor * ggml_set(
  795. struct ggml_context * ctx,
  796. struct ggml_tensor * a,
  797. struct ggml_tensor * b,
  798. size_t nb1,
  799. size_t nb2,
  800. size_t nb3,
  801. size_t offset);
  802. // b -> view(a,offset,nb1,nb2,3), return view(a)
  803. GGML_API struct ggml_tensor * ggml_set_inplace(
  804. struct ggml_context * ctx,
  805. struct ggml_tensor * a,
  806. struct ggml_tensor * b,
  807. size_t nb1,
  808. size_t nb2,
  809. size_t nb3,
  810. size_t offset);
  811. GGML_API struct ggml_tensor * ggml_set_1d(
  812. struct ggml_context * ctx,
  813. struct ggml_tensor * a,
  814. struct ggml_tensor * b,
  815. size_t offset);
  816. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  817. struct ggml_context * ctx,
  818. struct ggml_tensor * a,
  819. struct ggml_tensor * b,
  820. size_t offset);
  821. // b -> view(a,offset,nb1,nb2,3), return modified a
  822. GGML_API struct ggml_tensor * ggml_set_2d(
  823. struct ggml_context * ctx,
  824. struct ggml_tensor * a,
  825. struct ggml_tensor * b,
  826. size_t nb1,
  827. size_t offset);
  828. // b -> view(a,offset,nb1,nb2,3), return view(a)
  829. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  830. struct ggml_context * ctx,
  831. struct ggml_tensor * a,
  832. struct ggml_tensor * b,
  833. size_t nb1,
  834. size_t offset);
  835. // a -> b, return view(b)
  836. GGML_API struct ggml_tensor * ggml_cpy(
  837. struct ggml_context * ctx,
  838. struct ggml_tensor * a,
  839. struct ggml_tensor * b);
  840. // a -> b, in-place, return view(b)
  841. GGML_API struct ggml_tensor * ggml_cpy_inplace(
  842. struct ggml_context * ctx,
  843. struct ggml_tensor * a,
  844. struct ggml_tensor * b);
  845. // make contiguous
  846. GGML_API struct ggml_tensor * ggml_cont(
  847. struct ggml_context * ctx,
  848. struct ggml_tensor * a);
  849. // make contiguous, in-place
  850. GGML_API struct ggml_tensor * ggml_cont_inplace(
  851. struct ggml_context * ctx,
  852. struct ggml_tensor * a);
  853. // return view(a), b specifies the new shape
  854. // TODO: when we start computing gradient, make a copy instead of view
  855. GGML_API struct ggml_tensor * ggml_reshape(
  856. struct ggml_context * ctx,
  857. struct ggml_tensor * a,
  858. struct ggml_tensor * b);
  859. // return view(a)
  860. // TODO: when we start computing gradient, make a copy instead of view
  861. GGML_API struct ggml_tensor * ggml_reshape_1d(
  862. struct ggml_context * ctx,
  863. struct ggml_tensor * a,
  864. int64_t ne0);
  865. GGML_API struct ggml_tensor * ggml_reshape_2d(
  866. struct ggml_context * ctx,
  867. struct ggml_tensor * a,
  868. int64_t ne0,
  869. int64_t ne1);
  870. // return view(a)
  871. // TODO: when we start computing gradient, make a copy instead of view
  872. GGML_API struct ggml_tensor * ggml_reshape_3d(
  873. struct ggml_context * ctx,
  874. struct ggml_tensor * a,
  875. int64_t ne0,
  876. int64_t ne1,
  877. int64_t ne2);
  878. GGML_API struct ggml_tensor * ggml_reshape_4d(
  879. struct ggml_context * ctx,
  880. struct ggml_tensor * a,
  881. int64_t ne0,
  882. int64_t ne1,
  883. int64_t ne2,
  884. int64_t ne3);
  885. // offset in bytes
  886. GGML_API struct ggml_tensor * ggml_view_1d(
  887. struct ggml_context * ctx,
  888. struct ggml_tensor * a,
  889. int64_t ne0,
  890. size_t offset);
  891. GGML_API struct ggml_tensor * ggml_view_2d(
  892. struct ggml_context * ctx,
  893. struct ggml_tensor * a,
  894. int64_t ne0,
  895. int64_t ne1,
  896. size_t nb1, // row stride in bytes
  897. size_t offset);
  898. GGML_API struct ggml_tensor * ggml_view_3d(
  899. struct ggml_context * ctx,
  900. struct ggml_tensor * a,
  901. int64_t ne0,
  902. int64_t ne1,
  903. int64_t ne2,
  904. size_t nb1, // row stride in bytes
  905. size_t nb2, // slice stride in bytes
  906. size_t offset);
  907. GGML_API struct ggml_tensor * ggml_view_4d(
  908. struct ggml_context * ctx,
  909. struct ggml_tensor * a,
  910. int64_t ne0,
  911. int64_t ne1,
  912. int64_t ne2,
  913. int64_t ne3,
  914. size_t nb1, // row stride in bytes
  915. size_t nb2, // slice stride in bytes
  916. size_t nb3,
  917. size_t offset);
  918. GGML_API struct ggml_tensor * ggml_permute(
  919. struct ggml_context * ctx,
  920. struct ggml_tensor * a,
  921. int axis0,
  922. int axis1,
  923. int axis2,
  924. int axis3);
  925. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  926. GGML_API struct ggml_tensor * ggml_transpose(
  927. struct ggml_context * ctx,
  928. struct ggml_tensor * a);
  929. GGML_API struct ggml_tensor * ggml_get_rows(
  930. struct ggml_context * ctx,
  931. struct ggml_tensor * a,
  932. struct ggml_tensor * b);
  933. GGML_API struct ggml_tensor * ggml_get_rows_back(
  934. struct ggml_context * ctx,
  935. struct ggml_tensor * a,
  936. struct ggml_tensor * b,
  937. struct ggml_tensor * c);
  938. GGML_API struct ggml_tensor * ggml_diag(
  939. struct ggml_context * ctx,
  940. struct ggml_tensor * a);
  941. // set elements above the diagonal to -INF
  942. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  943. struct ggml_context * ctx,
  944. struct ggml_tensor * a,
  945. int n_past);
  946. // in-place, returns view(a)
  947. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  948. struct ggml_context * ctx,
  949. struct ggml_tensor * a,
  950. int n_past);
  951. // set elements above the diagonal to 0
  952. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  953. struct ggml_context * ctx,
  954. struct ggml_tensor * a,
  955. int n_past);
  956. // in-place, returns view(a)
  957. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  958. struct ggml_context * ctx,
  959. struct ggml_tensor * a,
  960. int n_past);
  961. GGML_API struct ggml_tensor * ggml_soft_max(
  962. struct ggml_context * ctx,
  963. struct ggml_tensor * a);
  964. // in-place, returns view(a)
  965. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  966. struct ggml_context * ctx,
  967. struct ggml_tensor * a);
  968. GGML_API struct ggml_tensor * ggml_soft_max_back(
  969. struct ggml_context * ctx,
  970. struct ggml_tensor * a,
  971. struct ggml_tensor * b);
  972. // in-place, returns view(a)
  973. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  974. struct ggml_context * ctx,
  975. struct ggml_tensor * a,
  976. struct ggml_tensor * b);
  977. // rotary position embedding
  978. // if mode & 1 == 1, skip n_past elements
  979. // if mode & 2 == 1, GPT-NeoX style
  980. // if mode & 4 == 1, ChatGLM style
  981. // TODO: avoid creating a new tensor every time
  982. GGML_API struct ggml_tensor * ggml_rope(
  983. struct ggml_context * ctx,
  984. struct ggml_tensor * a,
  985. int n_past,
  986. int n_dims,
  987. int mode,
  988. int n_ctx);
  989. // in-place, returns view(a)
  990. GGML_API struct ggml_tensor * ggml_rope_inplace(
  991. struct ggml_context * ctx,
  992. struct ggml_tensor * a,
  993. int n_past,
  994. int n_dims,
  995. int mode,
  996. int n_ctx);
  997. // custom RoPE
  998. GGML_API struct ggml_tensor * ggml_rope_custom(
  999. struct ggml_context * ctx,
  1000. struct ggml_tensor * a,
  1001. int n_past,
  1002. int n_dims,
  1003. int mode,
  1004. int n_ctx,
  1005. float freq_base,
  1006. float freq_scale);
  1007. // in-place, returns view(a)
  1008. GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
  1009. struct ggml_context * ctx,
  1010. struct ggml_tensor * a,
  1011. int n_past,
  1012. int n_dims,
  1013. int mode,
  1014. int n_ctx,
  1015. float freq_base,
  1016. float freq_scale);
  1017. // rotary position embedding backward, i.e compute dx from dy
  1018. // a - dy
  1019. GGML_API struct ggml_tensor * ggml_rope_back(
  1020. struct ggml_context * ctx,
  1021. struct ggml_tensor * a,
  1022. int n_past,
  1023. int n_dims,
  1024. int mode,
  1025. int n_ctx);
  1026. // alibi position embedding
  1027. // in-place, returns view(a)
  1028. struct ggml_tensor * ggml_alibi(
  1029. struct ggml_context * ctx,
  1030. struct ggml_tensor * a,
  1031. int n_past,
  1032. int n_head,
  1033. float bias_max);
  1034. // clamp
  1035. // in-place, returns view(a)
  1036. struct ggml_tensor * ggml_clamp(
  1037. struct ggml_context * ctx,
  1038. struct ggml_tensor * a,
  1039. float min,
  1040. float max);
  1041. GGML_API struct ggml_tensor * ggml_conv_1d(
  1042. struct ggml_context * ctx,
  1043. struct ggml_tensor * a,
  1044. struct ggml_tensor * b,
  1045. int s0, // stride
  1046. int p0, // padding
  1047. int d0); // dilation
  1048. GGML_API struct ggml_tensor * ggml_conv_2d(
  1049. struct ggml_context * ctx,
  1050. struct ggml_tensor * a,
  1051. struct ggml_tensor * b,
  1052. int s0,
  1053. int s1,
  1054. int p0,
  1055. int p1,
  1056. int d0,
  1057. int d1);
  1058. // conv_1d with padding = half
  1059. // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
  1060. GGML_API struct ggml_tensor * ggml_conv_1d_ph(
  1061. struct ggml_context * ctx,
  1062. struct ggml_tensor * a,
  1063. struct ggml_tensor * b,
  1064. int s,
  1065. int d);
  1066. enum ggml_op_pool {
  1067. GGML_OP_POOL_MAX,
  1068. GGML_OP_POOL_AVG,
  1069. GGML_OP_POOL_COUNT,
  1070. };
  1071. GGML_API struct ggml_tensor * ggml_pool_1d(
  1072. struct ggml_context * ctx,
  1073. struct ggml_tensor * a,
  1074. enum ggml_op_pool op,
  1075. int k0, // kernel size
  1076. int s0, // stride
  1077. int p0); // padding
  1078. GGML_API struct ggml_tensor * ggml_pool_2d(
  1079. struct ggml_context * ctx,
  1080. struct ggml_tensor * a,
  1081. enum ggml_op_pool op,
  1082. int k0,
  1083. int k1,
  1084. int s0,
  1085. int s1,
  1086. int p0,
  1087. int p1);
  1088. GGML_API struct ggml_tensor * ggml_flash_attn(
  1089. struct ggml_context * ctx,
  1090. struct ggml_tensor * q,
  1091. struct ggml_tensor * k,
  1092. struct ggml_tensor * v,
  1093. bool masked);
  1094. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  1095. struct ggml_context * ctx,
  1096. struct ggml_tensor * q,
  1097. struct ggml_tensor * k,
  1098. struct ggml_tensor * v,
  1099. struct ggml_tensor * d,
  1100. bool masked);
  1101. GGML_API struct ggml_tensor * ggml_flash_ff(
  1102. struct ggml_context * ctx,
  1103. struct ggml_tensor * a,
  1104. struct ggml_tensor * b0,
  1105. struct ggml_tensor * b1,
  1106. struct ggml_tensor * c0,
  1107. struct ggml_tensor * c1);
  1108. // partition into non-overlapping windows with padding if needed
  1109. // example:
  1110. // a: 768 64 64 1
  1111. // w: 14
  1112. // res: 768 14 14 25
  1113. // used in sam
  1114. GGML_API struct ggml_tensor * ggml_win_part(
  1115. struct ggml_context * ctx,
  1116. struct ggml_tensor * a,
  1117. int w);
  1118. // reverse of ggml_win_part
  1119. // used in sam
  1120. GGML_API struct ggml_tensor * ggml_win_unpart(
  1121. struct ggml_context * ctx,
  1122. struct ggml_tensor * a,
  1123. int w0,
  1124. int h0,
  1125. int w);
  1126. GGML_API struct ggml_tensor * ggml_unary(
  1127. struct ggml_context * ctx,
  1128. struct ggml_tensor * a,
  1129. enum ggml_unary_op op);
  1130. GGML_API struct ggml_tensor * ggml_unary_inplace(
  1131. struct ggml_context * ctx,
  1132. struct ggml_tensor * a,
  1133. enum ggml_unary_op op);
  1134. // custom operators
  1135. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1136. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1137. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1138. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1139. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1140. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1141. struct ggml_context * ctx,
  1142. struct ggml_tensor * a,
  1143. ggml_unary_op_f32_t fun),
  1144. "use ggml_map_custom1 instead");
  1145. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1146. struct ggml_context * ctx,
  1147. struct ggml_tensor * a,
  1148. ggml_unary_op_f32_t fun),
  1149. "use ggml_map_custom1_inplace instead");
  1150. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1151. struct ggml_context * ctx,
  1152. struct ggml_tensor * a,
  1153. struct ggml_tensor * b,
  1154. ggml_binary_op_f32_t fun),
  1155. "use ggml_map_custom2 instead");
  1156. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1157. struct ggml_context * ctx,
  1158. struct ggml_tensor * a,
  1159. struct ggml_tensor * b,
  1160. ggml_binary_op_f32_t fun),
  1161. "use ggml_map_custom2_inplace instead");
  1162. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1163. struct ggml_context * ctx,
  1164. struct ggml_tensor * a,
  1165. ggml_custom1_op_f32_t fun),
  1166. "use ggml_map_custom1 instead");
  1167. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1168. struct ggml_context * ctx,
  1169. struct ggml_tensor * a,
  1170. ggml_custom1_op_f32_t fun),
  1171. "use ggml_map_custom1_inplace instead");
  1172. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1173. struct ggml_context * ctx,
  1174. struct ggml_tensor * a,
  1175. struct ggml_tensor * b,
  1176. ggml_custom2_op_f32_t fun),
  1177. "use ggml_map_custom2 instead");
  1178. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1179. struct ggml_context * ctx,
  1180. struct ggml_tensor * a,
  1181. struct ggml_tensor * b,
  1182. ggml_custom2_op_f32_t fun),
  1183. "use ggml_map_custom2_inplace instead");
  1184. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1185. struct ggml_context * ctx,
  1186. struct ggml_tensor * a,
  1187. struct ggml_tensor * b,
  1188. struct ggml_tensor * c,
  1189. ggml_custom3_op_f32_t fun),
  1190. "use ggml_map_custom3 instead");
  1191. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1192. struct ggml_context * ctx,
  1193. struct ggml_tensor * a,
  1194. struct ggml_tensor * b,
  1195. struct ggml_tensor * c,
  1196. ggml_custom3_op_f32_t fun),
  1197. "use ggml_map_custom3_inplace instead");
  1198. // custom operators v2
  1199. typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
  1200. typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
  1201. typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
  1202. #define GGML_N_TASKS_MAX -1
  1203. GGML_API struct ggml_tensor * ggml_map_custom1(
  1204. struct ggml_context * ctx,
  1205. struct ggml_tensor * a,
  1206. ggml_custom1_op_t fun,
  1207. int n_tasks,
  1208. void * userdata);
  1209. GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
  1210. struct ggml_context * ctx,
  1211. struct ggml_tensor * a,
  1212. ggml_custom1_op_t fun,
  1213. int n_tasks,
  1214. void * userdata);
  1215. GGML_API struct ggml_tensor * ggml_map_custom2(
  1216. struct ggml_context * ctx,
  1217. struct ggml_tensor * a,
  1218. struct ggml_tensor * b,
  1219. ggml_custom2_op_t fun,
  1220. int n_tasks,
  1221. void * userdata);
  1222. GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
  1223. struct ggml_context * ctx,
  1224. struct ggml_tensor * a,
  1225. struct ggml_tensor * b,
  1226. ggml_custom2_op_t fun,
  1227. int n_tasks,
  1228. void * userdata);
  1229. GGML_API struct ggml_tensor * ggml_map_custom3(
  1230. struct ggml_context * ctx,
  1231. struct ggml_tensor * a,
  1232. struct ggml_tensor * b,
  1233. struct ggml_tensor * c,
  1234. ggml_custom3_op_t fun,
  1235. int n_tasks,
  1236. void * userdata);
  1237. GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
  1238. struct ggml_context * ctx,
  1239. struct ggml_tensor * a,
  1240. struct ggml_tensor * b,
  1241. struct ggml_tensor * c,
  1242. ggml_custom3_op_t fun,
  1243. int n_tasks,
  1244. void * userdata);
  1245. // loss function
  1246. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1247. struct ggml_context * ctx,
  1248. struct ggml_tensor * a,
  1249. struct ggml_tensor * b);
  1250. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1251. struct ggml_context * ctx,
  1252. struct ggml_tensor * a,
  1253. struct ggml_tensor * b,
  1254. struct ggml_tensor * c);
  1255. //
  1256. // automatic differentiation
  1257. //
  1258. GGML_API void ggml_set_param(
  1259. struct ggml_context * ctx,
  1260. struct ggml_tensor * tensor);
  1261. GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1262. GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
  1263. GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
  1264. // graph allocation in a context
  1265. GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx);
  1266. GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor);
  1267. GGML_API size_t ggml_graph_overhead(void);
  1268. // ggml_graph_plan() has to be called before ggml_graph_compute()
  1269. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  1270. GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
  1271. GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  1272. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
  1273. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  1274. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  1275. GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  1276. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1277. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1278. GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1279. // print info and performance information for the graph
  1280. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1281. // dump the graph into a file using the dot format
  1282. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1283. //
  1284. // optimization
  1285. //
  1286. // optimization methods
  1287. enum ggml_opt_type {
  1288. GGML_OPT_ADAM,
  1289. GGML_OPT_LBFGS,
  1290. };
  1291. // linesearch methods
  1292. enum ggml_linesearch {
  1293. GGML_LINESEARCH_DEFAULT = 1,
  1294. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1295. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1296. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1297. };
  1298. // optimization return values
  1299. enum ggml_opt_result {
  1300. GGML_OPT_OK = 0,
  1301. GGML_OPT_DID_NOT_CONVERGE,
  1302. GGML_OPT_NO_CONTEXT,
  1303. GGML_OPT_INVALID_WOLFE,
  1304. GGML_OPT_FAIL,
  1305. GGML_LINESEARCH_FAIL = -128,
  1306. GGML_LINESEARCH_MINIMUM_STEP,
  1307. GGML_LINESEARCH_MAXIMUM_STEP,
  1308. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1309. GGML_LINESEARCH_INVALID_PARAMETERS,
  1310. };
  1311. // optimization parameters
  1312. //
  1313. // see ggml.c (ggml_opt_default_params) for default values
  1314. //
  1315. struct ggml_opt_params {
  1316. enum ggml_opt_type type;
  1317. int n_threads;
  1318. // delta-based convergence test
  1319. //
  1320. // if past == 0 - disabled
  1321. // if past > 0:
  1322. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1323. //
  1324. int past;
  1325. float delta;
  1326. // maximum number of iterations without improvement
  1327. //
  1328. // if 0 - disabled
  1329. // if > 0:
  1330. // assume convergence if no cost improvement in this number of iterations
  1331. //
  1332. int max_no_improvement;
  1333. bool print_forward_graph;
  1334. bool print_backward_graph;
  1335. // ADAM parameters
  1336. struct {
  1337. int n_iter;
  1338. float sched; // schedule multiplier (fixed, decay or warmup)
  1339. float decay; // weight decay for AdamW, use 0.0f to disable
  1340. float alpha; // learning rate
  1341. float beta1;
  1342. float beta2;
  1343. float eps; // epsilon for numerical stability
  1344. float eps_f; // epsilon for convergence test
  1345. float eps_g; // epsilon for convergence test
  1346. } adam;
  1347. // LBFGS parameters
  1348. struct {
  1349. int m; // number of corrections to approximate the inv. Hessian
  1350. int n_iter;
  1351. int max_linesearch;
  1352. float eps; // convergence tolerance
  1353. float ftol; // line search tolerance
  1354. float wolfe;
  1355. float min_step;
  1356. float max_step;
  1357. enum ggml_linesearch linesearch;
  1358. } lbfgs;
  1359. };
  1360. struct ggml_opt_context {
  1361. struct ggml_context * ctx;
  1362. struct ggml_opt_params params;
  1363. int iter;
  1364. int64_t nx; // number of parameter elements
  1365. bool just_initialized;
  1366. struct {
  1367. struct ggml_tensor * x; // view of the parameters
  1368. struct ggml_tensor * g1; // gradient
  1369. struct ggml_tensor * g2; // gradient squared
  1370. struct ggml_tensor * m; // first moment
  1371. struct ggml_tensor * v; // second moment
  1372. struct ggml_tensor * mh; // first moment hat
  1373. struct ggml_tensor * vh; // second moment hat
  1374. struct ggml_tensor * pf; // past function values
  1375. float fx_best;
  1376. float fx_prev;
  1377. int n_no_improvement;
  1378. } adam;
  1379. struct {
  1380. struct ggml_tensor * x; // current parameters
  1381. struct ggml_tensor * xp; // previous parameters
  1382. struct ggml_tensor * g; // current gradient
  1383. struct ggml_tensor * gp; // previous gradient
  1384. struct ggml_tensor * d; // search direction
  1385. struct ggml_tensor * pf; // past function values
  1386. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1387. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1388. struct ggml_tensor * lms; // the L-BFGS memory s
  1389. struct ggml_tensor * lmy; // the L-BFGS memory y
  1390. float fx_best;
  1391. float step;
  1392. int j;
  1393. int k;
  1394. int end;
  1395. int n_no_improvement;
  1396. } lbfgs;
  1397. };
  1398. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1399. // optimize the function defined by the tensor f
  1400. GGML_API enum ggml_opt_result ggml_opt(
  1401. struct ggml_context * ctx,
  1402. struct ggml_opt_params params,
  1403. struct ggml_tensor * f);
  1404. // initialize optimizer context
  1405. GGML_API void ggml_opt_init(
  1406. struct ggml_context * ctx,
  1407. struct ggml_opt_context * opt,
  1408. struct ggml_opt_params params,
  1409. int64_t nx);
  1410. // continue optimizing the function defined by the tensor f
  1411. GGML_API enum ggml_opt_result ggml_opt_resume(
  1412. struct ggml_context * ctx,
  1413. struct ggml_opt_context * opt,
  1414. struct ggml_tensor * f);
  1415. // continue optimizing the function defined by the tensor f
  1416. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1417. struct ggml_context * ctx,
  1418. struct ggml_opt_context * opt,
  1419. struct ggml_tensor * f,
  1420. struct ggml_cgraph * gf,
  1421. struct ggml_cgraph * gb);
  1422. //
  1423. // quantization
  1424. //
  1425. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1426. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1427. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1428. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1429. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1430. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
  1431. //
  1432. // gguf
  1433. //
  1434. enum gguf_type {
  1435. GGUF_TYPE_UINT8 = 0,
  1436. GGUF_TYPE_INT8 = 1,
  1437. GGUF_TYPE_UINT16 = 2,
  1438. GGUF_TYPE_INT16 = 3,
  1439. GGUF_TYPE_UINT32 = 4,
  1440. GGUF_TYPE_INT32 = 5,
  1441. GGUF_TYPE_FLOAT32 = 6,
  1442. GGUF_TYPE_BOOL = 7,
  1443. GGUF_TYPE_STRING = 8,
  1444. GGUF_TYPE_ARRAY = 9,
  1445. GGUF_TYPE_COUNT, // marks the end of the enum
  1446. };
  1447. struct gguf_context;
  1448. struct gguf_init_params {
  1449. bool no_alloc;
  1450. // if not NULL, create a ggml_context and allocate the tensor data in it
  1451. struct ggml_context ** ctx;
  1452. };
  1453. GGML_API struct gguf_context * gguf_init_empty(void);
  1454. GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
  1455. //GGML_API struct gguf_context * gguf_init_from_buffer(..);
  1456. GGML_API void gguf_free(struct gguf_context * ctx);
  1457. GGML_API const char * gguf_type_name(enum gguf_type type);
  1458. GGML_API int gguf_get_version (struct gguf_context * ctx);
  1459. GGML_API size_t gguf_get_alignment (struct gguf_context * ctx);
  1460. GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx);
  1461. GGML_API void * gguf_get_data (struct gguf_context * ctx);
  1462. GGML_API int gguf_get_n_kv(struct gguf_context * ctx);
  1463. GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key);
  1464. GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i);
  1465. GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i);
  1466. GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i);
  1467. // results are undefined if the wrong type is used for the key
  1468. GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i);
  1469. GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i);
  1470. GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i);
  1471. GGML_API int16_t gguf_get_val_i16 (struct gguf_context * ctx, int i);
  1472. GGML_API uint32_t gguf_get_val_u32 (struct gguf_context * ctx, int i);
  1473. GGML_API int32_t gguf_get_val_i32 (struct gguf_context * ctx, int i);
  1474. GGML_API float gguf_get_val_f32 (struct gguf_context * ctx, int i);
  1475. GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i);
  1476. GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i);
  1477. GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i);
  1478. GGML_API const void * gguf_get_arr_data(struct gguf_context * ctx, int i);
  1479. GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i);
  1480. GGML_API int gguf_get_n_tensors (struct gguf_context * ctx);
  1481. GGML_API int gguf_find_tensor (struct gguf_context * ctx, const char * name);
  1482. GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i);
  1483. GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i);
  1484. // overrides existing values or adds a new one
  1485. GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val);
  1486. GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);
  1487. GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
  1488. GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);
  1489. GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
  1490. GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);
  1491. GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);
  1492. GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);
  1493. GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
  1494. GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
  1495. GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
  1496. // set or add KV pairs from another context
  1497. GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
  1498. // manage tensor info
  1499. GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
  1500. GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
  1501. GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
  1502. // writing gguf files can be done in 2 ways:
  1503. //
  1504. // - write the entire gguf_context to a binary file in a single pass:
  1505. //
  1506. // gguf_write_to_file(ctx, fname);
  1507. //
  1508. // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
  1509. //
  1510. // FILE * f = fopen(fname, "wb");
  1511. // fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
  1512. // fwrite(f, ...);
  1513. // void * data = gguf_meta_get_meta_data(ctx);
  1514. // fseek(f, 0, SEEK_SET);
  1515. // fwrite(f, data, gguf_get_meta_size(ctx));
  1516. // free(data);
  1517. // fclose(f);
  1518. //
  1519. // write the entire context to a binary file
  1520. GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta);
  1521. // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
  1522. GGML_API size_t gguf_get_meta_size(struct gguf_context * ctx);
  1523. GGML_API void gguf_get_meta_data(struct gguf_context * ctx, void * data);
  1524. //
  1525. // system info
  1526. //
  1527. GGML_API int ggml_cpu_has_avx (void);
  1528. GGML_API int ggml_cpu_has_avx2 (void);
  1529. GGML_API int ggml_cpu_has_avx512 (void);
  1530. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1531. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1532. GGML_API int ggml_cpu_has_fma (void);
  1533. GGML_API int ggml_cpu_has_neon (void);
  1534. GGML_API int ggml_cpu_has_arm_fma (void);
  1535. GGML_API int ggml_cpu_has_f16c (void);
  1536. GGML_API int ggml_cpu_has_fp16_va (void);
  1537. GGML_API int ggml_cpu_has_wasm_simd (void);
  1538. GGML_API int ggml_cpu_has_blas (void);
  1539. GGML_API int ggml_cpu_has_cublas (void);
  1540. GGML_API int ggml_cpu_has_clblast (void);
  1541. GGML_API int ggml_cpu_has_gpublas (void);
  1542. GGML_API int ggml_cpu_has_sse3 (void);
  1543. GGML_API int ggml_cpu_has_vsx (void);
  1544. //
  1545. // Internal types and functions exposed for tests and benchmarks
  1546. //
  1547. #ifdef __cplusplus
  1548. // restrict not standard in C++
  1549. #define GGML_RESTRICT
  1550. #else
  1551. #define GGML_RESTRICT restrict
  1552. #endif
  1553. typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1554. typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1555. typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
  1556. typedef struct {
  1557. const char * type_name;
  1558. int blck_size;
  1559. size_t type_size;
  1560. bool is_quantized;
  1561. ggml_to_float_t to_float;
  1562. ggml_from_float_t from_float;
  1563. ggml_from_float_t from_float_reference;
  1564. ggml_vec_dot_t vec_dot;
  1565. enum ggml_type vec_dot_type;
  1566. } ggml_type_traits_t;
  1567. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
  1568. #ifdef __cplusplus
  1569. }
  1570. #endif