ggml.h 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph * gf = ggml_new_graph(ctx);
  61. // ggml_build_forward_expand(gf, f);
  62. //
  63. // // set the input variable and parameter values
  64. // ggml_set_f32(x, 2.0f);
  65. // ggml_set_f32(a, 3.0f);
  66. // ggml_set_f32(b, 4.0f);
  67. //
  68. // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
  69. //
  70. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  71. //
  72. // ...
  73. // }
  74. //
  75. // The actual computation is performed in the ggml_graph_compute() function.
  76. //
  77. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  78. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  79. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  80. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  81. // actually needed.
  82. //
  83. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  84. // differentiation and optimization algorithms.
  85. //
  86. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  87. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  88. // the user can avoid the memory allocation overhead at runtime.
  89. //
  90. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  91. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  92. //
  93. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  94. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  95. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  96. // yet, but a few examples are demonstrated in the following operations:
  97. //
  98. // - ggml_permute()
  99. // - ggml_conv_1d_1s()
  100. // - ggml_conv_1d_2s()
  101. //
  102. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  103. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  104. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  105. // calculus class, or watch the following video:
  106. //
  107. // What is Automatic Differentiation?
  108. // https://www.youtube.com/watch?v=wG_nF1awSSY
  109. //
  110. //
  111. // ## Tensor data (struct ggml_tensor)
  112. //
  113. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  114. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  115. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  116. //
  117. // {
  118. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  119. //
  120. // assert(c->src[0] == a);
  121. // assert(c->src[1] == b);
  122. // }
  123. //
  124. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  125. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  126. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  127. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  128. // contiguous in memory.
  129. //
  130. // The data of the tensor is accessed via the "data" pointer. For example:
  131. //
  132. // {
  133. // const int nx = 2;
  134. // const int ny = 3;
  135. //
  136. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny);
  137. //
  138. // for (int y = 0; y < ny; y++) {
  139. // for (int x = 0; x < nx; x++) {
  140. // *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y;
  141. // }
  142. // }
  143. //
  144. // ...
  145. // }
  146. //
  147. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  148. //
  149. // ## The matrix multiplication operator (ggml_mul_mat)
  150. //
  151. // TODO
  152. //
  153. //
  154. // ## Multi-threading
  155. //
  156. // TODO
  157. //
  158. //
  159. // ## Overview of ggml.c
  160. //
  161. // TODO
  162. //
  163. //
  164. // ## SIMD optimizations
  165. //
  166. // TODO
  167. //
  168. //
  169. // ## Debugging ggml
  170. //
  171. // TODO
  172. //
  173. //
  174. #ifdef GGML_SHARED
  175. # if defined(_WIN32) && !defined(__MINGW32__)
  176. # ifdef GGML_BUILD
  177. # define GGML_API __declspec(dllexport)
  178. # else
  179. # define GGML_API __declspec(dllimport)
  180. # endif
  181. # else
  182. # define GGML_API __attribute__ ((visibility ("default")))
  183. # endif
  184. #else
  185. # define GGML_API
  186. #endif
  187. #ifdef GGML_MULTIPLATFORM
  188. # if defined(_WIN32)
  189. # define GGML_CALL
  190. # else
  191. # define GGML_CALL __attribute__((__ms_abi__))
  192. # endif
  193. #else
  194. # define GGML_CALL
  195. #endif
  196. // TODO: support for clang
  197. #ifdef __GNUC__
  198. # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  199. #elif defined(_MSC_VER)
  200. # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  201. #else
  202. # define GGML_DEPRECATED(func, hint) func
  203. #endif
  204. #ifndef __GNUC__
  205. # define GGML_ATTRIBUTE_FORMAT(...)
  206. #elif defined(__MINGW32__)
  207. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  208. #else
  209. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  210. #endif
  211. #include <stdint.h>
  212. #include <stddef.h>
  213. #include <stdbool.h>
  214. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  215. #define GGML_FILE_VERSION 1
  216. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  217. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  218. #define GGML_MAX_DIMS 4
  219. #define GGML_MAX_PARAMS 2048
  220. #define GGML_MAX_CONTEXTS 64
  221. #define GGML_MAX_SRC 10
  222. #ifndef GGML_MAX_NAME
  223. #define GGML_MAX_NAME 64
  224. #endif
  225. #define GGML_MAX_OP_PARAMS 64
  226. #define GGML_DEFAULT_N_THREADS 4
  227. #define GGML_DEFAULT_GRAPH_SIZE 2048
  228. #if UINTPTR_MAX == 0xFFFFFFFF
  229. #define GGML_MEM_ALIGN 4
  230. #else
  231. #define GGML_MEM_ALIGN 16
  232. #endif
  233. #define GGML_EXIT_SUCCESS 0
  234. #define GGML_EXIT_ABORTED 1
  235. #define GGUF_MAGIC "GGUF"
  236. #define GGUF_VERSION 3
  237. #define GGUF_DEFAULT_ALIGNMENT 32
  238. #define GGML_UNUSED(x) (void)(x)
  239. #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
  240. #define GGML_ASSERT(x) \
  241. do { \
  242. if (!(x)) { \
  243. fflush(stdout); \
  244. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  245. ggml_print_backtrace(); \
  246. abort(); \
  247. } \
  248. } while (0)
  249. #ifndef NDEBUG
  250. #define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
  251. #elif defined(__GNUC__)
  252. #define GGML_UNREACHABLE() __builtin_unreachable()
  253. #elif defined(_MSC_VER)
  254. #define GGML_UNREACHABLE() __assume(0)
  255. #else
  256. #define GGML_UNREACHABLE() ((void) 0)
  257. #endif
  258. // used to copy the number of elements and stride in bytes of tensors into local variables.
  259. // main purpose is to reduce code duplication and improve readability.
  260. //
  261. // example:
  262. //
  263. // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  264. // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  265. //
  266. #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
  267. const type prefix##0 = (pointer)->array[0]; \
  268. GGML_UNUSED(prefix##0);
  269. #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
  270. GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
  271. const type prefix##1 = (pointer)->array[1]; \
  272. GGML_UNUSED(prefix##1);
  273. #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
  274. GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
  275. const type prefix##2 = (pointer)->array[2]; \
  276. GGML_UNUSED(prefix##2);
  277. #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
  278. GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
  279. const type prefix##3 = (pointer)->array[3]; \
  280. GGML_UNUSED(prefix##3);
  281. #define GGML_TENSOR_UNARY_OP_LOCALS \
  282. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  283. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  284. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  285. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  286. #define GGML_TENSOR_BINARY_OP_LOCALS \
  287. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  288. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  289. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
  290. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
  291. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  292. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  293. #ifdef __cplusplus
  294. extern "C" {
  295. #endif
  296. #if defined(__ARM_NEON) && defined(__CUDACC__)
  297. typedef half ggml_fp16_t;
  298. #elif defined(__ARM_NEON) && !defined(_MSC_VER)
  299. typedef __fp16 ggml_fp16_t;
  300. #else
  301. typedef uint16_t ggml_fp16_t;
  302. #endif
  303. // convert FP16 <-> FP32
  304. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  305. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  306. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
  307. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
  308. struct ggml_object;
  309. struct ggml_context;
  310. enum ggml_type {
  311. GGML_TYPE_F32 = 0,
  312. GGML_TYPE_F16 = 1,
  313. GGML_TYPE_Q4_0 = 2,
  314. GGML_TYPE_Q4_1 = 3,
  315. // GGML_TYPE_Q4_2 = 4, support has been removed
  316. // GGML_TYPE_Q4_3 (5) support has been removed
  317. GGML_TYPE_Q5_0 = 6,
  318. GGML_TYPE_Q5_1 = 7,
  319. GGML_TYPE_Q8_0 = 8,
  320. GGML_TYPE_Q8_1 = 9,
  321. // k-quantizations
  322. GGML_TYPE_Q2_K = 10,
  323. GGML_TYPE_Q3_K = 11,
  324. GGML_TYPE_Q4_K = 12,
  325. GGML_TYPE_Q5_K = 13,
  326. GGML_TYPE_Q6_K = 14,
  327. GGML_TYPE_Q8_K = 15,
  328. GGML_TYPE_IQ2_XXS = 16,
  329. GGML_TYPE_IQ2_XS = 17,
  330. GGML_TYPE_IQ3_XXS = 18,
  331. GGML_TYPE_IQ1_S = 19,
  332. GGML_TYPE_I8,
  333. GGML_TYPE_I16,
  334. GGML_TYPE_I32,
  335. GGML_TYPE_COUNT,
  336. };
  337. // precision
  338. enum ggml_prec {
  339. GGML_PREC_DEFAULT,
  340. GGML_PREC_F32,
  341. };
  342. enum ggml_backend_type {
  343. GGML_BACKEND_CPU = 0,
  344. GGML_BACKEND_GPU = 10,
  345. GGML_BACKEND_GPU_SPLIT = 20,
  346. };
  347. // model file types
  348. enum ggml_ftype {
  349. GGML_FTYPE_UNKNOWN = -1,
  350. GGML_FTYPE_ALL_F32 = 0,
  351. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  352. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  353. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  354. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  355. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  356. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  357. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  358. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  359. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  360. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  361. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  362. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  363. GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors
  364. GGML_FTYPE_MOSTLY_IQ2_XS = 16, // except 1d tensors
  365. GGML_FTYPE_MOSTLY_IQ3_XXS = 17, // except 1d tensors
  366. GGML_FTYPE_MOSTLY_IQ1_S = 18, // except 1d tensors
  367. };
  368. // available tensor operations:
  369. enum ggml_op {
  370. GGML_OP_NONE = 0,
  371. GGML_OP_DUP,
  372. GGML_OP_ADD,
  373. GGML_OP_ADD1,
  374. GGML_OP_ACC,
  375. GGML_OP_SUB,
  376. GGML_OP_MUL,
  377. GGML_OP_DIV,
  378. GGML_OP_SQR,
  379. GGML_OP_SQRT,
  380. GGML_OP_LOG,
  381. GGML_OP_SUM,
  382. GGML_OP_SUM_ROWS,
  383. GGML_OP_MEAN,
  384. GGML_OP_ARGMAX,
  385. GGML_OP_REPEAT,
  386. GGML_OP_REPEAT_BACK,
  387. GGML_OP_CONCAT,
  388. GGML_OP_SILU_BACK,
  389. GGML_OP_NORM, // normalize
  390. GGML_OP_RMS_NORM,
  391. GGML_OP_RMS_NORM_BACK,
  392. GGML_OP_GROUP_NORM,
  393. GGML_OP_MUL_MAT,
  394. GGML_OP_MUL_MAT_ID,
  395. GGML_OP_OUT_PROD,
  396. GGML_OP_SCALE,
  397. GGML_OP_SET,
  398. GGML_OP_CPY,
  399. GGML_OP_CONT,
  400. GGML_OP_RESHAPE,
  401. GGML_OP_VIEW,
  402. GGML_OP_PERMUTE,
  403. GGML_OP_TRANSPOSE,
  404. GGML_OP_GET_ROWS,
  405. GGML_OP_GET_ROWS_BACK,
  406. GGML_OP_DIAG,
  407. GGML_OP_DIAG_MASK_INF,
  408. GGML_OP_DIAG_MASK_ZERO,
  409. GGML_OP_SOFT_MAX,
  410. GGML_OP_SOFT_MAX_BACK,
  411. GGML_OP_ROPE,
  412. GGML_OP_ROPE_BACK,
  413. GGML_OP_ALIBI,
  414. GGML_OP_CLAMP,
  415. GGML_OP_CONV_TRANSPOSE_1D,
  416. GGML_OP_IM2COL,
  417. GGML_OP_CONV_TRANSPOSE_2D,
  418. GGML_OP_POOL_1D,
  419. GGML_OP_POOL_2D,
  420. GGML_OP_UPSCALE, // nearest interpolate
  421. GGML_OP_PAD,
  422. GGML_OP_ARGSORT,
  423. GGML_OP_LEAKY_RELU,
  424. GGML_OP_FLASH_ATTN,
  425. GGML_OP_FLASH_FF,
  426. GGML_OP_FLASH_ATTN_BACK,
  427. GGML_OP_WIN_PART,
  428. GGML_OP_WIN_UNPART,
  429. GGML_OP_GET_REL_POS,
  430. GGML_OP_ADD_REL_POS,
  431. GGML_OP_UNARY,
  432. GGML_OP_MAP_UNARY,
  433. GGML_OP_MAP_BINARY,
  434. GGML_OP_MAP_CUSTOM1_F32,
  435. GGML_OP_MAP_CUSTOM2_F32,
  436. GGML_OP_MAP_CUSTOM3_F32,
  437. GGML_OP_MAP_CUSTOM1,
  438. GGML_OP_MAP_CUSTOM2,
  439. GGML_OP_MAP_CUSTOM3,
  440. GGML_OP_CROSS_ENTROPY_LOSS,
  441. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  442. GGML_OP_COUNT,
  443. };
  444. enum ggml_unary_op {
  445. GGML_UNARY_OP_ABS,
  446. GGML_UNARY_OP_SGN,
  447. GGML_UNARY_OP_NEG,
  448. GGML_UNARY_OP_STEP,
  449. GGML_UNARY_OP_TANH,
  450. GGML_UNARY_OP_ELU,
  451. GGML_UNARY_OP_RELU,
  452. GGML_UNARY_OP_GELU,
  453. GGML_UNARY_OP_GELU_QUICK,
  454. GGML_UNARY_OP_SILU,
  455. GGML_UNARY_OP_HARDSWISH,
  456. GGML_UNARY_OP_HARDSIGMOID,
  457. GGML_UNARY_OP_COUNT,
  458. };
  459. enum ggml_object_type {
  460. GGML_OBJECT_TENSOR,
  461. GGML_OBJECT_GRAPH,
  462. GGML_OBJECT_WORK_BUFFER
  463. };
  464. enum ggml_log_level {
  465. GGML_LOG_LEVEL_ERROR = 2,
  466. GGML_LOG_LEVEL_WARN = 3,
  467. GGML_LOG_LEVEL_INFO = 4,
  468. GGML_LOG_LEVEL_DEBUG = 5
  469. };
  470. enum ggml_tensor_flag {
  471. GGML_TENSOR_FLAG_INPUT = 1,
  472. GGML_TENSOR_FLAG_OUTPUT = 2,
  473. GGML_TENSOR_FLAG_PARAM = 4,
  474. };
  475. // ggml object
  476. struct ggml_object {
  477. size_t offs;
  478. size_t size;
  479. struct ggml_object * next;
  480. enum ggml_object_type type;
  481. char padding[4];
  482. };
  483. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  484. // n-dimensional tensor
  485. struct ggml_tensor {
  486. enum ggml_type type;
  487. enum ggml_backend_type backend;
  488. struct ggml_backend_buffer * buffer;
  489. int64_t ne[GGML_MAX_DIMS]; // number of elements
  490. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  491. // nb[0] = ggml_type_size(type)
  492. // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding
  493. // nb[i] = nb[i-1] * ne[i-1]
  494. // compute data
  495. enum ggml_op op;
  496. // op params - allocated as int32_t for alignment
  497. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
  498. int32_t flags;
  499. struct ggml_tensor * grad;
  500. struct ggml_tensor * src[GGML_MAX_SRC];
  501. // performance
  502. int perf_runs;
  503. int64_t perf_cycles;
  504. int64_t perf_time_us;
  505. struct ggml_tensor * view_src;
  506. size_t view_offs;
  507. void * data;
  508. char name[GGML_MAX_NAME];
  509. void * extra; // extra things e.g. for ggml-cuda.cu
  510. char padding[8];
  511. };
  512. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  513. // Abort callback
  514. // If not NULL, called before ggml computation
  515. // If it returns true, the computation is aborted
  516. typedef bool (*ggml_abort_callback)(void * data);
  517. // the compute plan that needs to be prepared for ggml_graph_compute()
  518. // since https://github.com/ggerganov/ggml/issues/287
  519. struct ggml_cplan {
  520. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  521. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  522. int n_threads;
  523. // abort ggml_graph_compute when true
  524. ggml_abort_callback abort_callback;
  525. void * abort_callback_data;
  526. };
  527. enum ggml_cgraph_eval_order {
  528. GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
  529. GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
  530. GGML_CGRAPH_EVAL_ORDER_COUNT
  531. };
  532. struct ggml_hash_set {
  533. size_t size;
  534. struct ggml_tensor ** keys;
  535. };
  536. // computation graph
  537. struct ggml_cgraph {
  538. int size;
  539. int n_nodes;
  540. int n_leafs;
  541. struct ggml_tensor ** nodes;
  542. struct ggml_tensor ** grads;
  543. struct ggml_tensor ** leafs;
  544. struct ggml_hash_set visited_hash_table;
  545. enum ggml_cgraph_eval_order order;
  546. // performance
  547. int perf_runs;
  548. int64_t perf_cycles;
  549. int64_t perf_time_us;
  550. };
  551. // scratch buffer
  552. struct ggml_scratch {
  553. size_t offs;
  554. size_t size;
  555. void * data;
  556. };
  557. struct ggml_init_params {
  558. // memory pool
  559. size_t mem_size; // bytes
  560. void * mem_buffer; // if NULL, memory will be allocated internally
  561. bool no_alloc; // don't allocate memory for the tensor data
  562. };
  563. // compute types
  564. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  565. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  566. enum ggml_task_type {
  567. GGML_TASK_INIT = 0,
  568. GGML_TASK_COMPUTE,
  569. GGML_TASK_FINALIZE,
  570. };
  571. struct ggml_compute_params {
  572. enum ggml_task_type type;
  573. // ith = thread index, nth = number of threads
  574. int ith, nth;
  575. // work buffer for all threads
  576. size_t wsize;
  577. void * wdata;
  578. };
  579. // numa strategies
  580. enum ggml_numa_strategy {
  581. GGML_NUMA_STRATEGY_DISABLED = 0,
  582. GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
  583. GGML_NUMA_STRATEGY_ISOLATE = 2,
  584. GGML_NUMA_STRATEGY_NUMACTL = 3,
  585. GGML_NUMA_STRATEGY_MIRROR = 4,
  586. GGML_NUMA_STRATEGY_COUNT
  587. };
  588. // misc
  589. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  590. GGML_API int64_t ggml_time_ms(void);
  591. GGML_API int64_t ggml_time_us(void);
  592. GGML_API int64_t ggml_cycles(void);
  593. GGML_API int64_t ggml_cycles_per_ms(void);
  594. GGML_API void ggml_print_backtrace(void);
  595. GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
  596. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  597. GGML_API void ggml_print_object (const struct ggml_object * obj);
  598. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  599. GGML_API GGML_CALL int64_t ggml_nelements (const struct ggml_tensor * tensor);
  600. GGML_API GGML_CALL int64_t ggml_nrows (const struct ggml_tensor * tensor);
  601. GGML_API GGML_CALL size_t ggml_nbytes (const struct ggml_tensor * tensor);
  602. GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
  603. GGML_API GGML_CALL int ggml_blck_size(enum ggml_type type);
  604. GGML_API GGML_CALL size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block
  605. GGML_API GGML_CALL size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row
  606. GGML_DEPRECATED(
  607. GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float
  608. "use ggml_row_size() instead");
  609. GGML_API GGML_CALL const char * ggml_type_name(enum ggml_type type);
  610. GGML_API GGML_CALL const char * ggml_op_name (enum ggml_op op);
  611. GGML_API const char * ggml_op_symbol(enum ggml_op op);
  612. GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op);
  613. GGML_API GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name
  614. GGML_API GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor);
  615. GGML_API GGML_CALL bool ggml_is_quantized(enum ggml_type type);
  616. // TODO: temporary until model loading of ggml examples is refactored
  617. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  618. GGML_API GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor);
  619. GGML_API GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  620. GGML_API GGML_CALL bool ggml_is_permuted (const struct ggml_tensor * tensor);
  621. GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor);
  622. GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor);
  623. GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor);
  624. GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
  625. GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
  626. GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
  627. // use this to compute the memory overhead of a tensor
  628. GGML_API size_t ggml_tensor_overhead(void);
  629. // main
  630. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  631. GGML_API void ggml_free(struct ggml_context * ctx);
  632. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  633. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  634. GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
  635. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  636. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  637. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  638. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  639. GGML_API struct ggml_tensor * ggml_new_tensor(
  640. struct ggml_context * ctx,
  641. enum ggml_type type,
  642. int n_dims,
  643. const int64_t *ne);
  644. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  645. struct ggml_context * ctx,
  646. enum ggml_type type,
  647. int64_t ne0);
  648. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  649. struct ggml_context * ctx,
  650. enum ggml_type type,
  651. int64_t ne0,
  652. int64_t ne1);
  653. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  654. struct ggml_context * ctx,
  655. enum ggml_type type,
  656. int64_t ne0,
  657. int64_t ne1,
  658. int64_t ne2);
  659. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  660. struct ggml_context * ctx,
  661. enum ggml_type type,
  662. int64_t ne0,
  663. int64_t ne1,
  664. int64_t ne2,
  665. int64_t ne3);
  666. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  667. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  668. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  669. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src);
  670. // Context tensor enumeration and lookup
  671. GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx);
  672. GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor);
  673. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  674. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  675. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  676. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  677. // Converts a flat index into coordinates
  678. GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
  679. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  680. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  681. GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  682. GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
  683. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  684. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  685. GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  686. GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
  687. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  688. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  689. GGML_API GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
  690. GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
  691. GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
  692. GGML_ATTRIBUTE_FORMAT(2, 3)
  693. GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
  694. //
  695. // operations on tensors with backpropagation
  696. //
  697. GGML_API struct ggml_tensor * ggml_dup(
  698. struct ggml_context * ctx,
  699. struct ggml_tensor * a);
  700. // in-place, returns view(a)
  701. GGML_API struct ggml_tensor * ggml_dup_inplace(
  702. struct ggml_context * ctx,
  703. struct ggml_tensor * a);
  704. GGML_API struct ggml_tensor * ggml_add(
  705. struct ggml_context * ctx,
  706. struct ggml_tensor * a,
  707. struct ggml_tensor * b);
  708. GGML_API struct ggml_tensor * ggml_add_inplace(
  709. struct ggml_context * ctx,
  710. struct ggml_tensor * a,
  711. struct ggml_tensor * b);
  712. GGML_API struct ggml_tensor * ggml_add_cast(
  713. struct ggml_context * ctx,
  714. struct ggml_tensor * a,
  715. struct ggml_tensor * b,
  716. enum ggml_type type);
  717. GGML_API struct ggml_tensor * ggml_add1(
  718. struct ggml_context * ctx,
  719. struct ggml_tensor * a,
  720. struct ggml_tensor * b);
  721. GGML_API struct ggml_tensor * ggml_add1_inplace(
  722. struct ggml_context * ctx,
  723. struct ggml_tensor * a,
  724. struct ggml_tensor * b);
  725. // dst = a
  726. // view(dst, nb1, nb2, nb3, offset) += b
  727. // return dst
  728. GGML_API struct ggml_tensor * ggml_acc(
  729. struct ggml_context * ctx,
  730. struct ggml_tensor * a,
  731. struct ggml_tensor * b,
  732. size_t nb1,
  733. size_t nb2,
  734. size_t nb3,
  735. size_t offset);
  736. GGML_API struct ggml_tensor * ggml_acc_inplace(
  737. struct ggml_context * ctx,
  738. struct ggml_tensor * a,
  739. struct ggml_tensor * b,
  740. size_t nb1,
  741. size_t nb2,
  742. size_t nb3,
  743. size_t offset);
  744. GGML_API struct ggml_tensor * ggml_sub(
  745. struct ggml_context * ctx,
  746. struct ggml_tensor * a,
  747. struct ggml_tensor * b);
  748. GGML_API struct ggml_tensor * ggml_sub_inplace(
  749. struct ggml_context * ctx,
  750. struct ggml_tensor * a,
  751. struct ggml_tensor * b);
  752. GGML_API struct ggml_tensor * ggml_mul(
  753. struct ggml_context * ctx,
  754. struct ggml_tensor * a,
  755. struct ggml_tensor * b);
  756. GGML_API struct ggml_tensor * ggml_mul_inplace(
  757. struct ggml_context * ctx,
  758. struct ggml_tensor * a,
  759. struct ggml_tensor * b);
  760. GGML_API struct ggml_tensor * ggml_div(
  761. struct ggml_context * ctx,
  762. struct ggml_tensor * a,
  763. struct ggml_tensor * b);
  764. GGML_API struct ggml_tensor * ggml_div_inplace(
  765. struct ggml_context * ctx,
  766. struct ggml_tensor * a,
  767. struct ggml_tensor * b);
  768. GGML_API struct ggml_tensor * ggml_sqr(
  769. struct ggml_context * ctx,
  770. struct ggml_tensor * a);
  771. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  772. struct ggml_context * ctx,
  773. struct ggml_tensor * a);
  774. GGML_API struct ggml_tensor * ggml_sqrt(
  775. struct ggml_context * ctx,
  776. struct ggml_tensor * a);
  777. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  778. struct ggml_context * ctx,
  779. struct ggml_tensor * a);
  780. GGML_API struct ggml_tensor * ggml_log(
  781. struct ggml_context * ctx,
  782. struct ggml_tensor * a);
  783. GGML_API struct ggml_tensor * ggml_log_inplace(
  784. struct ggml_context * ctx,
  785. struct ggml_tensor * a);
  786. // return scalar
  787. GGML_API struct ggml_tensor * ggml_sum(
  788. struct ggml_context * ctx,
  789. struct ggml_tensor * a);
  790. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  791. GGML_API struct ggml_tensor * ggml_sum_rows(
  792. struct ggml_context * ctx,
  793. struct ggml_tensor * a);
  794. // mean along rows
  795. GGML_API struct ggml_tensor * ggml_mean(
  796. struct ggml_context * ctx,
  797. struct ggml_tensor * a);
  798. // argmax along rows
  799. GGML_API struct ggml_tensor * ggml_argmax(
  800. struct ggml_context * ctx,
  801. struct ggml_tensor * a);
  802. // if a is the same shape as b, and a is not parameter, return a
  803. // otherwise, return a new tensor: repeat(a) to fit in b
  804. GGML_API struct ggml_tensor * ggml_repeat(
  805. struct ggml_context * ctx,
  806. struct ggml_tensor * a,
  807. struct ggml_tensor * b);
  808. // sums repetitions in a into shape of b
  809. GGML_API struct ggml_tensor * ggml_repeat_back(
  810. struct ggml_context * ctx,
  811. struct ggml_tensor * a,
  812. struct ggml_tensor * b);
  813. // concat a and b on dim 2
  814. // used in stable-diffusion
  815. GGML_API struct ggml_tensor * ggml_concat(
  816. struct ggml_context * ctx,
  817. struct ggml_tensor * a,
  818. struct ggml_tensor * b);
  819. GGML_API struct ggml_tensor * ggml_abs(
  820. struct ggml_context * ctx,
  821. struct ggml_tensor * a);
  822. GGML_API struct ggml_tensor * ggml_abs_inplace(
  823. struct ggml_context * ctx,
  824. struct ggml_tensor * a);
  825. GGML_API struct ggml_tensor * ggml_sgn(
  826. struct ggml_context * ctx,
  827. struct ggml_tensor * a);
  828. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  829. struct ggml_context * ctx,
  830. struct ggml_tensor * a);
  831. GGML_API struct ggml_tensor * ggml_neg(
  832. struct ggml_context * ctx,
  833. struct ggml_tensor * a);
  834. GGML_API struct ggml_tensor * ggml_neg_inplace(
  835. struct ggml_context * ctx,
  836. struct ggml_tensor * a);
  837. GGML_API struct ggml_tensor * ggml_step(
  838. struct ggml_context * ctx,
  839. struct ggml_tensor * a);
  840. GGML_API struct ggml_tensor * ggml_step_inplace(
  841. struct ggml_context * ctx,
  842. struct ggml_tensor * a);
  843. GGML_API struct ggml_tensor * ggml_tanh(
  844. struct ggml_context * ctx,
  845. struct ggml_tensor * a);
  846. GGML_API struct ggml_tensor * ggml_tanh_inplace(
  847. struct ggml_context * ctx,
  848. struct ggml_tensor * a);
  849. GGML_API struct ggml_tensor * ggml_elu(
  850. struct ggml_context * ctx,
  851. struct ggml_tensor * a);
  852. GGML_API struct ggml_tensor * ggml_elu_inplace(
  853. struct ggml_context * ctx,
  854. struct ggml_tensor * a);
  855. GGML_API struct ggml_tensor * ggml_relu(
  856. struct ggml_context * ctx,
  857. struct ggml_tensor * a);
  858. GGML_API struct ggml_tensor * ggml_leaky_relu(
  859. struct ggml_context * ctx,
  860. struct ggml_tensor * a, float negative_slope, bool inplace);
  861. GGML_API struct ggml_tensor * ggml_relu_inplace(
  862. struct ggml_context * ctx,
  863. struct ggml_tensor * a);
  864. GGML_API struct ggml_tensor * ggml_gelu(
  865. struct ggml_context * ctx,
  866. struct ggml_tensor * a);
  867. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  868. struct ggml_context * ctx,
  869. struct ggml_tensor * a);
  870. GGML_API struct ggml_tensor * ggml_gelu_quick(
  871. struct ggml_context * ctx,
  872. struct ggml_tensor * a);
  873. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  874. struct ggml_context * ctx,
  875. struct ggml_tensor * a);
  876. GGML_API struct ggml_tensor * ggml_silu(
  877. struct ggml_context * ctx,
  878. struct ggml_tensor * a);
  879. GGML_API struct ggml_tensor * ggml_silu_inplace(
  880. struct ggml_context * ctx,
  881. struct ggml_tensor * a);
  882. // a - x
  883. // b - dy
  884. GGML_API struct ggml_tensor * ggml_silu_back(
  885. struct ggml_context * ctx,
  886. struct ggml_tensor * a,
  887. struct ggml_tensor * b);
  888. // hardswish(x) = x * relu6(x + 3) / 6
  889. GGML_API struct ggml_tensor * ggml_hardswish(
  890. struct ggml_context * ctx,
  891. struct ggml_tensor * a);
  892. // hardsigmoid(x) = relu6(x + 3) / 6
  893. GGML_API struct ggml_tensor * ggml_hardsigmoid(
  894. struct ggml_context * ctx,
  895. struct ggml_tensor * a);
  896. // normalize along rows
  897. GGML_API struct ggml_tensor * ggml_norm(
  898. struct ggml_context * ctx,
  899. struct ggml_tensor * a,
  900. float eps);
  901. GGML_API struct ggml_tensor * ggml_norm_inplace(
  902. struct ggml_context * ctx,
  903. struct ggml_tensor * a,
  904. float eps);
  905. GGML_API struct ggml_tensor * ggml_rms_norm(
  906. struct ggml_context * ctx,
  907. struct ggml_tensor * a,
  908. float eps);
  909. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  910. struct ggml_context * ctx,
  911. struct ggml_tensor * a,
  912. float eps);
  913. // group normalize along ne0*ne1*n_groups
  914. // used in stable-diffusion
  915. // TODO: eps is hardcoded to 1e-6 for now
  916. GGML_API struct ggml_tensor * ggml_group_norm(
  917. struct ggml_context * ctx,
  918. struct ggml_tensor * a,
  919. int n_groups);
  920. GGML_API struct ggml_tensor * ggml_group_norm_inplace(
  921. struct ggml_context * ctx,
  922. struct ggml_tensor * a,
  923. int n_groups);
  924. // a - x
  925. // b - dy
  926. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  927. struct ggml_context * ctx,
  928. struct ggml_tensor * a,
  929. struct ggml_tensor * b,
  930. float eps);
  931. // A: k columns, n rows => [ne03, ne02, n, k]
  932. // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k]
  933. // result is n columns, m rows => [ne03 * x, ne02 * y, m, n]
  934. GGML_API struct ggml_tensor * ggml_mul_mat(
  935. struct ggml_context * ctx,
  936. struct ggml_tensor * a,
  937. struct ggml_tensor * b);
  938. // change the precision of a matrix multiplication
  939. // set to GGML_PREC_F32 for higher precision (useful for phi-2)
  940. GGML_API void ggml_mul_mat_set_prec(
  941. struct ggml_tensor * a,
  942. enum ggml_prec prec);
  943. // indirect matrix multiplication
  944. // ggml_mul_mat_id(ctx, as, ids, id, b) ~= ggml_mul_mat(as[ids[id]], b)
  945. GGML_API struct ggml_tensor * ggml_mul_mat_id(
  946. struct ggml_context * ctx,
  947. struct ggml_tensor * const as[],
  948. int n_as,
  949. struct ggml_tensor * ids,
  950. int id,
  951. struct ggml_tensor * b);
  952. // A: m columns, n rows,
  953. // B: p columns, n rows,
  954. // result is m columns, p rows
  955. GGML_API struct ggml_tensor * ggml_out_prod(
  956. struct ggml_context * ctx,
  957. struct ggml_tensor * a,
  958. struct ggml_tensor * b);
  959. //
  960. // operations on tensors without backpropagation
  961. //
  962. GGML_API struct ggml_tensor * ggml_scale(
  963. struct ggml_context * ctx,
  964. struct ggml_tensor * a,
  965. float s);
  966. // in-place, returns view(a)
  967. GGML_API struct ggml_tensor * ggml_scale_inplace(
  968. struct ggml_context * ctx,
  969. struct ggml_tensor * a,
  970. float s);
  971. // b -> view(a,offset,nb1,nb2,3), return modified a
  972. GGML_API struct ggml_tensor * ggml_set(
  973. struct ggml_context * ctx,
  974. struct ggml_tensor * a,
  975. struct ggml_tensor * b,
  976. size_t nb1,
  977. size_t nb2,
  978. size_t nb3,
  979. size_t offset);
  980. // b -> view(a,offset,nb1,nb2,3), return view(a)
  981. GGML_API struct ggml_tensor * ggml_set_inplace(
  982. struct ggml_context * ctx,
  983. struct ggml_tensor * a,
  984. struct ggml_tensor * b,
  985. size_t nb1,
  986. size_t nb2,
  987. size_t nb3,
  988. size_t offset);
  989. GGML_API struct ggml_tensor * ggml_set_1d(
  990. struct ggml_context * ctx,
  991. struct ggml_tensor * a,
  992. struct ggml_tensor * b,
  993. size_t offset);
  994. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  995. struct ggml_context * ctx,
  996. struct ggml_tensor * a,
  997. struct ggml_tensor * b,
  998. size_t offset);
  999. // b -> view(a,offset,nb1,nb2,3), return modified a
  1000. GGML_API struct ggml_tensor * ggml_set_2d(
  1001. struct ggml_context * ctx,
  1002. struct ggml_tensor * a,
  1003. struct ggml_tensor * b,
  1004. size_t nb1,
  1005. size_t offset);
  1006. // b -> view(a,offset,nb1,nb2,3), return view(a)
  1007. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  1008. struct ggml_context * ctx,
  1009. struct ggml_tensor * a,
  1010. struct ggml_tensor * b,
  1011. size_t nb1,
  1012. size_t offset);
  1013. // a -> b, return view(b)
  1014. GGML_API struct ggml_tensor * ggml_cpy(
  1015. struct ggml_context * ctx,
  1016. struct ggml_tensor * a,
  1017. struct ggml_tensor * b);
  1018. GGML_API struct ggml_tensor * ggml_cast(
  1019. struct ggml_context * ctx,
  1020. struct ggml_tensor * a,
  1021. enum ggml_type type);
  1022. // make contiguous
  1023. GGML_API struct ggml_tensor * ggml_cont(
  1024. struct ggml_context * ctx,
  1025. struct ggml_tensor * a);
  1026. // make contiguous, with new shape
  1027. GGML_API struct ggml_tensor * ggml_cont_1d(
  1028. struct ggml_context * ctx,
  1029. struct ggml_tensor * a,
  1030. int64_t ne0);
  1031. GGML_API struct ggml_tensor * ggml_cont_2d(
  1032. struct ggml_context * ctx,
  1033. struct ggml_tensor * a,
  1034. int64_t ne0,
  1035. int64_t ne1);
  1036. GGML_API struct ggml_tensor * ggml_cont_3d(
  1037. struct ggml_context * ctx,
  1038. struct ggml_tensor * a,
  1039. int64_t ne0,
  1040. int64_t ne1,
  1041. int64_t ne2);
  1042. GGML_API struct ggml_tensor * ggml_cont_4d(
  1043. struct ggml_context * ctx,
  1044. struct ggml_tensor * a,
  1045. int64_t ne0,
  1046. int64_t ne1,
  1047. int64_t ne2,
  1048. int64_t ne3);
  1049. // return view(a), b specifies the new shape
  1050. // TODO: when we start computing gradient, make a copy instead of view
  1051. GGML_API struct ggml_tensor * ggml_reshape(
  1052. struct ggml_context * ctx,
  1053. struct ggml_tensor * a,
  1054. struct ggml_tensor * b);
  1055. // return view(a)
  1056. // TODO: when we start computing gradient, make a copy instead of view
  1057. GGML_API struct ggml_tensor * ggml_reshape_1d(
  1058. struct ggml_context * ctx,
  1059. struct ggml_tensor * a,
  1060. int64_t ne0);
  1061. GGML_API struct ggml_tensor * ggml_reshape_2d(
  1062. struct ggml_context * ctx,
  1063. struct ggml_tensor * a,
  1064. int64_t ne0,
  1065. int64_t ne1);
  1066. // return view(a)
  1067. // TODO: when we start computing gradient, make a copy instead of view
  1068. GGML_API struct ggml_tensor * ggml_reshape_3d(
  1069. struct ggml_context * ctx,
  1070. struct ggml_tensor * a,
  1071. int64_t ne0,
  1072. int64_t ne1,
  1073. int64_t ne2);
  1074. GGML_API struct ggml_tensor * ggml_reshape_4d(
  1075. struct ggml_context * ctx,
  1076. struct ggml_tensor * a,
  1077. int64_t ne0,
  1078. int64_t ne1,
  1079. int64_t ne2,
  1080. int64_t ne3);
  1081. // offset in bytes
  1082. GGML_API struct ggml_tensor * ggml_view_1d(
  1083. struct ggml_context * ctx,
  1084. struct ggml_tensor * a,
  1085. int64_t ne0,
  1086. size_t offset);
  1087. GGML_API struct ggml_tensor * ggml_view_2d(
  1088. struct ggml_context * ctx,
  1089. struct ggml_tensor * a,
  1090. int64_t ne0,
  1091. int64_t ne1,
  1092. size_t nb1, // row stride in bytes
  1093. size_t offset);
  1094. GGML_API struct ggml_tensor * ggml_view_3d(
  1095. struct ggml_context * ctx,
  1096. struct ggml_tensor * a,
  1097. int64_t ne0,
  1098. int64_t ne1,
  1099. int64_t ne2,
  1100. size_t nb1, // row stride in bytes
  1101. size_t nb2, // slice stride in bytes
  1102. size_t offset);
  1103. GGML_API struct ggml_tensor * ggml_view_4d(
  1104. struct ggml_context * ctx,
  1105. struct ggml_tensor * a,
  1106. int64_t ne0,
  1107. int64_t ne1,
  1108. int64_t ne2,
  1109. int64_t ne3,
  1110. size_t nb1, // row stride in bytes
  1111. size_t nb2, // slice stride in bytes
  1112. size_t nb3,
  1113. size_t offset);
  1114. GGML_API struct ggml_tensor * ggml_permute(
  1115. struct ggml_context * ctx,
  1116. struct ggml_tensor * a,
  1117. int axis0,
  1118. int axis1,
  1119. int axis2,
  1120. int axis3);
  1121. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  1122. GGML_API struct ggml_tensor * ggml_transpose(
  1123. struct ggml_context * ctx,
  1124. struct ggml_tensor * a);
  1125. // supports 3D: a->ne[2] == b->ne[1]
  1126. GGML_API struct ggml_tensor * ggml_get_rows(
  1127. struct ggml_context * ctx,
  1128. struct ggml_tensor * a,
  1129. struct ggml_tensor * b);
  1130. GGML_API struct ggml_tensor * ggml_get_rows_back(
  1131. struct ggml_context * ctx,
  1132. struct ggml_tensor * a,
  1133. struct ggml_tensor * b,
  1134. struct ggml_tensor * c);
  1135. GGML_API struct ggml_tensor * ggml_diag(
  1136. struct ggml_context * ctx,
  1137. struct ggml_tensor * a);
  1138. // set elements above the diagonal to -INF
  1139. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  1140. struct ggml_context * ctx,
  1141. struct ggml_tensor * a,
  1142. int n_past);
  1143. // in-place, returns view(a)
  1144. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  1145. struct ggml_context * ctx,
  1146. struct ggml_tensor * a,
  1147. int n_past);
  1148. // set elements above the diagonal to 0
  1149. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  1150. struct ggml_context * ctx,
  1151. struct ggml_tensor * a,
  1152. int n_past);
  1153. // in-place, returns view(a)
  1154. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  1155. struct ggml_context * ctx,
  1156. struct ggml_tensor * a,
  1157. int n_past);
  1158. GGML_API struct ggml_tensor * ggml_soft_max(
  1159. struct ggml_context * ctx,
  1160. struct ggml_tensor * a);
  1161. // in-place, returns view(a)
  1162. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  1163. struct ggml_context * ctx,
  1164. struct ggml_tensor * a);
  1165. // fused soft_max(a*scale + mask + pos[i]*(ALiBi slope))
  1166. // mask is optional
  1167. // pos is required when max_bias > 0.0f
  1168. // max_bias = 0.0f for no ALiBi
  1169. GGML_API struct ggml_tensor * ggml_soft_max_ext(
  1170. struct ggml_context * ctx,
  1171. struct ggml_tensor * a,
  1172. struct ggml_tensor * mask,
  1173. struct ggml_tensor * pos,
  1174. float scale,
  1175. float max_bias);
  1176. GGML_API struct ggml_tensor * ggml_soft_max_back(
  1177. struct ggml_context * ctx,
  1178. struct ggml_tensor * a,
  1179. struct ggml_tensor * b);
  1180. // in-place, returns view(a)
  1181. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  1182. struct ggml_context * ctx,
  1183. struct ggml_tensor * a,
  1184. struct ggml_tensor * b);
  1185. // rotary position embedding
  1186. // if mode & 1 == 1, skip n_past elements (DEPRECATED)
  1187. // if mode & 2 == 1, GPT-NeoX style
  1188. // if mode & 4 == 1, ChatGLM style
  1189. //
  1190. // b is an int32 vector with size a->ne[2], it contains the positions
  1191. GGML_API struct ggml_tensor * ggml_rope(
  1192. struct ggml_context * ctx,
  1193. struct ggml_tensor * a,
  1194. struct ggml_tensor * b,
  1195. int n_dims,
  1196. int mode,
  1197. int n_ctx);
  1198. // in-place, returns view(a)
  1199. GGML_API struct ggml_tensor * ggml_rope_inplace(
  1200. struct ggml_context * ctx,
  1201. struct ggml_tensor * a,
  1202. struct ggml_tensor * b,
  1203. int n_dims,
  1204. int mode,
  1205. int n_ctx);
  1206. // custom RoPE
  1207. GGML_API struct ggml_tensor * ggml_rope_custom(
  1208. struct ggml_context * ctx,
  1209. struct ggml_tensor * a,
  1210. struct ggml_tensor * b,
  1211. int n_dims,
  1212. int mode,
  1213. int n_ctx,
  1214. int n_orig_ctx,
  1215. float freq_base,
  1216. float freq_scale,
  1217. float ext_factor,
  1218. float attn_factor,
  1219. float beta_fast,
  1220. float beta_slow);
  1221. // in-place, returns view(a)
  1222. GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
  1223. struct ggml_context * ctx,
  1224. struct ggml_tensor * a,
  1225. struct ggml_tensor * b,
  1226. int n_dims,
  1227. int mode,
  1228. int n_ctx,
  1229. int n_orig_ctx,
  1230. float freq_base,
  1231. float freq_scale,
  1232. float ext_factor,
  1233. float attn_factor,
  1234. float beta_fast,
  1235. float beta_slow);
  1236. // compute correction dims for YaRN RoPE scaling
  1237. GGML_CALL void ggml_rope_yarn_corr_dims(
  1238. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
  1239. // xPos RoPE, in-place, returns view(a)
  1240. GGML_API struct ggml_tensor * ggml_rope_xpos_inplace(
  1241. struct ggml_context * ctx,
  1242. struct ggml_tensor * a,
  1243. struct ggml_tensor * b,
  1244. int n_dims,
  1245. float base,
  1246. bool down);
  1247. // rotary position embedding backward, i.e compute dx from dy
  1248. // a - dy
  1249. GGML_API struct ggml_tensor * ggml_rope_back(
  1250. struct ggml_context * ctx,
  1251. struct ggml_tensor * a,
  1252. struct ggml_tensor * b,
  1253. int n_dims,
  1254. int mode,
  1255. int n_ctx,
  1256. int n_orig_ctx,
  1257. float freq_base,
  1258. float freq_scale,
  1259. float ext_factor,
  1260. float attn_factor,
  1261. float beta_fast,
  1262. float beta_slow,
  1263. float xpos_base,
  1264. bool xpos_down);
  1265. // alibi position embedding
  1266. // in-place, returns view(a)
  1267. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_alibi(
  1268. struct ggml_context * ctx,
  1269. struct ggml_tensor * a,
  1270. int n_past,
  1271. int n_head,
  1272. float bias_max),
  1273. "use ggml_soft_max_ext instead (will be removed in Mar 2024)");
  1274. // clamp
  1275. // in-place, returns view(a)
  1276. GGML_API struct ggml_tensor * ggml_clamp(
  1277. struct ggml_context * ctx,
  1278. struct ggml_tensor * a,
  1279. float min,
  1280. float max);
  1281. GGML_API struct ggml_tensor * ggml_im2col(
  1282. struct ggml_context * ctx,
  1283. struct ggml_tensor * a,
  1284. struct ggml_tensor * b,
  1285. int s0,
  1286. int s1,
  1287. int p0,
  1288. int p1,
  1289. int d0,
  1290. int d1,
  1291. bool is_2D,
  1292. enum ggml_type dst_type);
  1293. GGML_API struct ggml_tensor * ggml_conv_depthwise_2d(
  1294. struct ggml_context * ctx,
  1295. struct ggml_tensor * a,
  1296. struct ggml_tensor * b,
  1297. int s0,
  1298. int s1,
  1299. int p0,
  1300. int p1,
  1301. int d0,
  1302. int d1);
  1303. GGML_API struct ggml_tensor * ggml_conv_1d(
  1304. struct ggml_context * ctx,
  1305. struct ggml_tensor * a,
  1306. struct ggml_tensor * b,
  1307. int s0, // stride
  1308. int p0, // padding
  1309. int d0); // dilation
  1310. // conv_1d with padding = half
  1311. // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
  1312. GGML_API struct ggml_tensor* ggml_conv_1d_ph(
  1313. struct ggml_context * ctx,
  1314. struct ggml_tensor * a,
  1315. struct ggml_tensor * b,
  1316. int s,
  1317. int d);
  1318. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  1319. struct ggml_context * ctx,
  1320. struct ggml_tensor * a,
  1321. struct ggml_tensor * b,
  1322. int s0,
  1323. int p0,
  1324. int d0);
  1325. GGML_API struct ggml_tensor * ggml_conv_2d(
  1326. struct ggml_context * ctx,
  1327. struct ggml_tensor * a,
  1328. struct ggml_tensor * b,
  1329. int s0,
  1330. int s1,
  1331. int p0,
  1332. int p1,
  1333. int d0,
  1334. int d1);
  1335. // kernel size is a->ne[0] x a->ne[1]
  1336. // stride is equal to kernel size
  1337. // padding is zero
  1338. // example:
  1339. // a: 16 16 3 768
  1340. // b: 1024 1024 3 1
  1341. // res: 64 64 768 1
  1342. // used in sam
  1343. GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
  1344. struct ggml_context * ctx,
  1345. struct ggml_tensor * a,
  1346. struct ggml_tensor * b);
  1347. // kernel size is a->ne[0] x a->ne[1]
  1348. // stride is 1
  1349. // padding is half
  1350. // example:
  1351. // a: 3 3 256 256
  1352. // b: 64 64 256 1
  1353. // res: 64 64 256 1
  1354. // used in sam
  1355. GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph(
  1356. struct ggml_context * ctx,
  1357. struct ggml_tensor * a,
  1358. struct ggml_tensor * b);
  1359. GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0(
  1360. struct ggml_context * ctx,
  1361. struct ggml_tensor * a,
  1362. struct ggml_tensor * b,
  1363. int stride);
  1364. enum ggml_op_pool {
  1365. GGML_OP_POOL_MAX,
  1366. GGML_OP_POOL_AVG,
  1367. GGML_OP_POOL_COUNT,
  1368. };
  1369. GGML_API struct ggml_tensor * ggml_pool_1d(
  1370. struct ggml_context * ctx,
  1371. struct ggml_tensor * a,
  1372. enum ggml_op_pool op,
  1373. int k0, // kernel size
  1374. int s0, // stride
  1375. int p0); // padding
  1376. // the result will have 2*p0 padding for the first dimension
  1377. // and 2*p1 padding for the second dimension
  1378. GGML_API struct ggml_tensor * ggml_pool_2d(
  1379. struct ggml_context * ctx,
  1380. struct ggml_tensor * a,
  1381. enum ggml_op_pool op,
  1382. int k0,
  1383. int k1,
  1384. int s0,
  1385. int s1,
  1386. float p0,
  1387. float p1);
  1388. // nearest interpolate
  1389. // used in stable-diffusion
  1390. GGML_API struct ggml_tensor * ggml_upscale(
  1391. struct ggml_context * ctx,
  1392. struct ggml_tensor * a,
  1393. int scale_factor);
  1394. // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0]
  1395. GGML_API struct ggml_tensor * ggml_pad(
  1396. struct ggml_context * ctx,
  1397. struct ggml_tensor * a,
  1398. int p0,
  1399. int p1,
  1400. int p2,
  1401. int p3);
  1402. // sort rows
  1403. enum ggml_sort_order {
  1404. GGML_SORT_ASC,
  1405. GGML_SORT_DESC,
  1406. };
  1407. GGML_API struct ggml_tensor * ggml_argsort(
  1408. struct ggml_context * ctx,
  1409. struct ggml_tensor * a,
  1410. enum ggml_sort_order order);
  1411. // top k elements per row
  1412. GGML_API struct ggml_tensor * ggml_top_k(
  1413. struct ggml_context * ctx,
  1414. struct ggml_tensor * a,
  1415. int k);
  1416. GGML_API struct ggml_tensor * ggml_flash_attn(
  1417. struct ggml_context * ctx,
  1418. struct ggml_tensor * q,
  1419. struct ggml_tensor * k,
  1420. struct ggml_tensor * v,
  1421. bool masked);
  1422. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  1423. struct ggml_context * ctx,
  1424. struct ggml_tensor * q,
  1425. struct ggml_tensor * k,
  1426. struct ggml_tensor * v,
  1427. struct ggml_tensor * d,
  1428. bool masked);
  1429. GGML_API struct ggml_tensor * ggml_flash_ff(
  1430. struct ggml_context * ctx,
  1431. struct ggml_tensor * a,
  1432. struct ggml_tensor * b0,
  1433. struct ggml_tensor * b1,
  1434. struct ggml_tensor * c0,
  1435. struct ggml_tensor * c1);
  1436. // partition into non-overlapping windows with padding if needed
  1437. // example:
  1438. // a: 768 64 64 1
  1439. // w: 14
  1440. // res: 768 14 14 25
  1441. // used in sam
  1442. GGML_API struct ggml_tensor * ggml_win_part(
  1443. struct ggml_context * ctx,
  1444. struct ggml_tensor * a,
  1445. int w);
  1446. // reverse of ggml_win_part
  1447. // used in sam
  1448. GGML_API struct ggml_tensor * ggml_win_unpart(
  1449. struct ggml_context * ctx,
  1450. struct ggml_tensor * a,
  1451. int w0,
  1452. int h0,
  1453. int w);
  1454. GGML_API struct ggml_tensor * ggml_unary(
  1455. struct ggml_context * ctx,
  1456. struct ggml_tensor * a,
  1457. enum ggml_unary_op op);
  1458. GGML_API struct ggml_tensor * ggml_unary_inplace(
  1459. struct ggml_context * ctx,
  1460. struct ggml_tensor * a,
  1461. enum ggml_unary_op op);
  1462. // used in sam
  1463. GGML_API struct ggml_tensor * ggml_get_rel_pos(
  1464. struct ggml_context * ctx,
  1465. struct ggml_tensor * a,
  1466. int qh,
  1467. int kh);
  1468. // used in sam
  1469. GGML_API struct ggml_tensor * ggml_add_rel_pos(
  1470. struct ggml_context * ctx,
  1471. struct ggml_tensor * a,
  1472. struct ggml_tensor * pw,
  1473. struct ggml_tensor * ph);
  1474. GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace(
  1475. struct ggml_context * ctx,
  1476. struct ggml_tensor * a,
  1477. struct ggml_tensor * pw,
  1478. struct ggml_tensor * ph);
  1479. // custom operators
  1480. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1481. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1482. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1483. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1484. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1485. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1486. struct ggml_context * ctx,
  1487. struct ggml_tensor * a,
  1488. ggml_unary_op_f32_t fun),
  1489. "use ggml_map_custom1 instead");
  1490. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1491. struct ggml_context * ctx,
  1492. struct ggml_tensor * a,
  1493. ggml_unary_op_f32_t fun),
  1494. "use ggml_map_custom1_inplace instead");
  1495. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1496. struct ggml_context * ctx,
  1497. struct ggml_tensor * a,
  1498. struct ggml_tensor * b,
  1499. ggml_binary_op_f32_t fun),
  1500. "use ggml_map_custom2 instead");
  1501. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1502. struct ggml_context * ctx,
  1503. struct ggml_tensor * a,
  1504. struct ggml_tensor * b,
  1505. ggml_binary_op_f32_t fun),
  1506. "use ggml_map_custom2_inplace instead");
  1507. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1508. struct ggml_context * ctx,
  1509. struct ggml_tensor * a,
  1510. ggml_custom1_op_f32_t fun),
  1511. "use ggml_map_custom1 instead");
  1512. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1513. struct ggml_context * ctx,
  1514. struct ggml_tensor * a,
  1515. ggml_custom1_op_f32_t fun),
  1516. "use ggml_map_custom1_inplace instead");
  1517. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1518. struct ggml_context * ctx,
  1519. struct ggml_tensor * a,
  1520. struct ggml_tensor * b,
  1521. ggml_custom2_op_f32_t fun),
  1522. "use ggml_map_custom2 instead");
  1523. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1524. struct ggml_context * ctx,
  1525. struct ggml_tensor * a,
  1526. struct ggml_tensor * b,
  1527. ggml_custom2_op_f32_t fun),
  1528. "use ggml_map_custom2_inplace instead");
  1529. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1530. struct ggml_context * ctx,
  1531. struct ggml_tensor * a,
  1532. struct ggml_tensor * b,
  1533. struct ggml_tensor * c,
  1534. ggml_custom3_op_f32_t fun),
  1535. "use ggml_map_custom3 instead");
  1536. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1537. struct ggml_context * ctx,
  1538. struct ggml_tensor * a,
  1539. struct ggml_tensor * b,
  1540. struct ggml_tensor * c,
  1541. ggml_custom3_op_f32_t fun),
  1542. "use ggml_map_custom3_inplace instead");
  1543. // custom operators v2
  1544. typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
  1545. typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
  1546. typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
  1547. #define GGML_N_TASKS_MAX -1
  1548. GGML_API struct ggml_tensor * ggml_map_custom1(
  1549. struct ggml_context * ctx,
  1550. struct ggml_tensor * a,
  1551. ggml_custom1_op_t fun,
  1552. int n_tasks,
  1553. void * userdata);
  1554. GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
  1555. struct ggml_context * ctx,
  1556. struct ggml_tensor * a,
  1557. ggml_custom1_op_t fun,
  1558. int n_tasks,
  1559. void * userdata);
  1560. GGML_API struct ggml_tensor * ggml_map_custom2(
  1561. struct ggml_context * ctx,
  1562. struct ggml_tensor * a,
  1563. struct ggml_tensor * b,
  1564. ggml_custom2_op_t fun,
  1565. int n_tasks,
  1566. void * userdata);
  1567. GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
  1568. struct ggml_context * ctx,
  1569. struct ggml_tensor * a,
  1570. struct ggml_tensor * b,
  1571. ggml_custom2_op_t fun,
  1572. int n_tasks,
  1573. void * userdata);
  1574. GGML_API struct ggml_tensor * ggml_map_custom3(
  1575. struct ggml_context * ctx,
  1576. struct ggml_tensor * a,
  1577. struct ggml_tensor * b,
  1578. struct ggml_tensor * c,
  1579. ggml_custom3_op_t fun,
  1580. int n_tasks,
  1581. void * userdata);
  1582. GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
  1583. struct ggml_context * ctx,
  1584. struct ggml_tensor * a,
  1585. struct ggml_tensor * b,
  1586. struct ggml_tensor * c,
  1587. ggml_custom3_op_t fun,
  1588. int n_tasks,
  1589. void * userdata);
  1590. // loss function
  1591. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1592. struct ggml_context * ctx,
  1593. struct ggml_tensor * a,
  1594. struct ggml_tensor * b);
  1595. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1596. struct ggml_context * ctx,
  1597. struct ggml_tensor * a,
  1598. struct ggml_tensor * b,
  1599. struct ggml_tensor * c);
  1600. //
  1601. // automatic differentiation
  1602. //
  1603. GGML_API void ggml_set_param(
  1604. struct ggml_context * ctx,
  1605. struct ggml_tensor * tensor);
  1606. GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1607. GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
  1608. // graph allocation in a context
  1609. GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
  1610. GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads);
  1611. GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph);
  1612. GGML_API struct ggml_cgraph ggml_graph_view (struct ggml_cgraph * cgraph, int i0, int i1);
  1613. GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst);
  1614. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // zero grads
  1615. GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph);
  1616. GGML_API size_t ggml_graph_overhead(void);
  1617. GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
  1618. // ggml_graph_plan() has to be called before ggml_graph_compute()
  1619. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  1620. GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
  1621. GGML_API int ggml_graph_compute( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  1622. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  1623. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  1624. GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  1625. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1626. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1627. GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1628. // print info and performance information for the graph
  1629. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1630. // dump the graph into a file using the dot format
  1631. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1632. // build gradient checkpointing backward graph gb for gf using provided checkpoints
  1633. // gb_tmp will contain original backward graph with rewritten backward process nodes,
  1634. // but without the second forward pass nodes.
  1635. GGML_API void ggml_build_backward_gradient_checkpointing(
  1636. struct ggml_context * ctx,
  1637. struct ggml_cgraph * gf,
  1638. struct ggml_cgraph * gb,
  1639. struct ggml_cgraph * gb_tmp,
  1640. struct ggml_tensor * * checkpoints,
  1641. int n_checkpoints);
  1642. //
  1643. // optimization
  1644. //
  1645. // optimization methods
  1646. enum ggml_opt_type {
  1647. GGML_OPT_ADAM,
  1648. GGML_OPT_LBFGS,
  1649. };
  1650. // linesearch methods
  1651. enum ggml_linesearch {
  1652. GGML_LINESEARCH_DEFAULT = 1,
  1653. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1654. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1655. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1656. };
  1657. // optimization return values
  1658. enum ggml_opt_result {
  1659. GGML_OPT_OK = 0,
  1660. GGML_OPT_DID_NOT_CONVERGE,
  1661. GGML_OPT_NO_CONTEXT,
  1662. GGML_OPT_INVALID_WOLFE,
  1663. GGML_OPT_FAIL,
  1664. GGML_OPT_CANCEL,
  1665. GGML_LINESEARCH_FAIL = -128,
  1666. GGML_LINESEARCH_MINIMUM_STEP,
  1667. GGML_LINESEARCH_MAXIMUM_STEP,
  1668. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1669. GGML_LINESEARCH_INVALID_PARAMETERS,
  1670. };
  1671. typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
  1672. typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
  1673. // optimization parameters
  1674. //
  1675. // see ggml.c (ggml_opt_default_params) for default values
  1676. //
  1677. struct ggml_opt_params {
  1678. enum ggml_opt_type type;
  1679. size_t graph_size;
  1680. int n_threads;
  1681. // delta-based convergence test
  1682. //
  1683. // if past == 0 - disabled
  1684. // if past > 0:
  1685. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1686. //
  1687. int past;
  1688. float delta;
  1689. // maximum number of iterations without improvement
  1690. //
  1691. // if 0 - disabled
  1692. // if > 0:
  1693. // assume convergence if no cost improvement in this number of iterations
  1694. //
  1695. int max_no_improvement;
  1696. bool print_forward_graph;
  1697. bool print_backward_graph;
  1698. int n_gradient_accumulation;
  1699. // ADAM parameters
  1700. struct {
  1701. int n_iter;
  1702. float sched; // schedule multiplier (fixed, decay or warmup)
  1703. float decay; // weight decay for AdamW, use 0.0f to disable
  1704. int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
  1705. float alpha; // learning rate
  1706. float beta1;
  1707. float beta2;
  1708. float eps; // epsilon for numerical stability
  1709. float eps_f; // epsilon for convergence test
  1710. float eps_g; // epsilon for convergence test
  1711. float gclip; // gradient clipping
  1712. } adam;
  1713. // LBFGS parameters
  1714. struct {
  1715. int m; // number of corrections to approximate the inv. Hessian
  1716. int n_iter;
  1717. int max_linesearch;
  1718. float eps; // convergence tolerance
  1719. float ftol; // line search tolerance
  1720. float wolfe;
  1721. float min_step;
  1722. float max_step;
  1723. enum ggml_linesearch linesearch;
  1724. } lbfgs;
  1725. };
  1726. struct ggml_opt_context {
  1727. struct ggml_context * ctx;
  1728. struct ggml_opt_params params;
  1729. int iter;
  1730. int64_t nx; // number of parameter elements
  1731. bool just_initialized;
  1732. float loss_before;
  1733. float loss_after;
  1734. struct {
  1735. struct ggml_tensor * g; // current gradient
  1736. struct ggml_tensor * m; // first moment
  1737. struct ggml_tensor * v; // second moment
  1738. struct ggml_tensor * pf; // past function values
  1739. float fx_best;
  1740. float fx_prev;
  1741. int n_no_improvement;
  1742. } adam;
  1743. struct {
  1744. struct ggml_tensor * x; // current parameters
  1745. struct ggml_tensor * xp; // previous parameters
  1746. struct ggml_tensor * g; // current gradient
  1747. struct ggml_tensor * gp; // previous gradient
  1748. struct ggml_tensor * d; // search direction
  1749. struct ggml_tensor * pf; // past function values
  1750. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1751. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1752. struct ggml_tensor * lms; // the L-BFGS memory s
  1753. struct ggml_tensor * lmy; // the L-BFGS memory y
  1754. float fx_best;
  1755. float step;
  1756. int j;
  1757. int k;
  1758. int end;
  1759. int n_no_improvement;
  1760. } lbfgs;
  1761. };
  1762. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1763. // optimize the function defined by the tensor f
  1764. GGML_API enum ggml_opt_result ggml_opt(
  1765. struct ggml_context * ctx,
  1766. struct ggml_opt_params params,
  1767. struct ggml_tensor * f);
  1768. // initialize optimizer context
  1769. GGML_API void ggml_opt_init(
  1770. struct ggml_context * ctx,
  1771. struct ggml_opt_context * opt,
  1772. struct ggml_opt_params params,
  1773. int64_t nx);
  1774. // continue optimizing the function defined by the tensor f
  1775. GGML_API enum ggml_opt_result ggml_opt_resume(
  1776. struct ggml_context * ctx,
  1777. struct ggml_opt_context * opt,
  1778. struct ggml_tensor * f);
  1779. // continue optimizing the function defined by the tensor f
  1780. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1781. struct ggml_context * ctx,
  1782. struct ggml_opt_context * opt,
  1783. struct ggml_tensor * f,
  1784. struct ggml_cgraph * gf,
  1785. struct ggml_cgraph * gb,
  1786. ggml_opt_callback callback,
  1787. void * callback_data);
  1788. //
  1789. // tensor flags
  1790. //
  1791. GGML_API void ggml_set_input(struct ggml_tensor * tensor);
  1792. GGML_API void ggml_set_output(struct ggml_tensor * tensor);
  1793. //
  1794. // quantization
  1795. //
  1796. // - ggml_quantize_init can be called multiple times with the same type
  1797. // it will only initialize the quantization tables for the first call or after ggml_quantize_free
  1798. // automatically called by ggml_quantize_chunk for convenience
  1799. //
  1800. // - ggml_quantize_free will free any memory allocated by ggml_quantize_init
  1801. // call this at the end of the program to avoid memory leaks
  1802. //
  1803. // note: these are thread-safe
  1804. //
  1805. GGML_API void ggml_quantize_init(enum ggml_type type);
  1806. GGML_API void ggml_quantize_free(void);
  1807. // TODO: these would probably get removed in favor of the more general ggml_quantize_chunk
  1808. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1809. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1810. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1811. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1812. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1813. GGML_API size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1814. GGML_API size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1815. GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1816. GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1817. GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1818. // some quantization type cannot be used without an importance matrix
  1819. GGML_API bool ggml_quantize_requires_imatrix(enum ggml_type type);
  1820. // calls ggml_quantize_init internally (i.e. can allocate memory)
  1821. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst,
  1822. int start, int nrows, int n_per_row, int64_t * hist, const float * imatrix);
  1823. //
  1824. // gguf
  1825. //
  1826. enum gguf_type {
  1827. GGUF_TYPE_UINT8 = 0,
  1828. GGUF_TYPE_INT8 = 1,
  1829. GGUF_TYPE_UINT16 = 2,
  1830. GGUF_TYPE_INT16 = 3,
  1831. GGUF_TYPE_UINT32 = 4,
  1832. GGUF_TYPE_INT32 = 5,
  1833. GGUF_TYPE_FLOAT32 = 6,
  1834. GGUF_TYPE_BOOL = 7,
  1835. GGUF_TYPE_STRING = 8,
  1836. GGUF_TYPE_ARRAY = 9,
  1837. GGUF_TYPE_UINT64 = 10,
  1838. GGUF_TYPE_INT64 = 11,
  1839. GGUF_TYPE_FLOAT64 = 12,
  1840. GGUF_TYPE_COUNT, // marks the end of the enum
  1841. };
  1842. struct gguf_context;
  1843. struct gguf_init_params {
  1844. bool no_alloc;
  1845. // if not NULL, create a ggml_context and allocate the tensor data in it
  1846. struct ggml_context ** ctx;
  1847. };
  1848. GGML_API struct gguf_context * gguf_init_empty(void);
  1849. GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
  1850. //GGML_API struct gguf_context * gguf_init_from_buffer(..);
  1851. GGML_API void gguf_free(struct gguf_context * ctx);
  1852. GGML_API const char * gguf_type_name(enum gguf_type type);
  1853. GGML_API int gguf_get_version (const struct gguf_context * ctx);
  1854. GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx);
  1855. GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx);
  1856. GGML_API void * gguf_get_data (const struct gguf_context * ctx);
  1857. GGML_API int gguf_get_n_kv(const struct gguf_context * ctx);
  1858. GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key);
  1859. GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
  1860. GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
  1861. GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
  1862. // will abort if the wrong type is used for the key
  1863. GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id);
  1864. GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id);
  1865. GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
  1866. GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
  1867. GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
  1868. GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
  1869. GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
  1870. GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
  1871. GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
  1872. GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
  1873. GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
  1874. GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
  1875. GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id);
  1876. GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id);
  1877. GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
  1878. GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
  1879. GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx);
  1880. GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name);
  1881. GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i);
  1882. GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i);
  1883. GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i);
  1884. // overrides existing values or adds a new one
  1885. GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val);
  1886. GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);
  1887. GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
  1888. GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);
  1889. GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
  1890. GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);
  1891. GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);
  1892. GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val);
  1893. GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val);
  1894. GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val);
  1895. GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);
  1896. GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
  1897. GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
  1898. GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
  1899. // set or add KV pairs from another context
  1900. GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
  1901. // manage tensor info
  1902. GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
  1903. GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
  1904. GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
  1905. // writing gguf files can be done in 2 ways:
  1906. //
  1907. // - write the entire gguf_context to a binary file in a single pass:
  1908. //
  1909. // gguf_write_to_file(ctx, fname);
  1910. //
  1911. // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
  1912. //
  1913. // FILE * f = fopen(fname, "wb");
  1914. // fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
  1915. // fwrite(f, ...);
  1916. // void * data = gguf_meta_get_meta_data(ctx);
  1917. // fseek(f, 0, SEEK_SET);
  1918. // fwrite(f, data, gguf_get_meta_size(ctx));
  1919. // free(data);
  1920. // fclose(f);
  1921. //
  1922. // write the entire context to a binary file
  1923. GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
  1924. // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
  1925. GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
  1926. GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data);
  1927. //
  1928. // system info
  1929. //
  1930. GGML_API int ggml_cpu_has_avx (void);
  1931. GGML_API int ggml_cpu_has_avx_vnni (void);
  1932. GGML_API int ggml_cpu_has_avx2 (void);
  1933. GGML_API int ggml_cpu_has_avx512 (void);
  1934. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1935. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1936. GGML_API int ggml_cpu_has_fma (void);
  1937. GGML_API int ggml_cpu_has_neon (void);
  1938. GGML_API int ggml_cpu_has_arm_fma (void);
  1939. GGML_API int ggml_cpu_has_metal (void);
  1940. GGML_API int ggml_cpu_has_f16c (void);
  1941. GGML_API int ggml_cpu_has_fp16_va (void);
  1942. GGML_API int ggml_cpu_has_wasm_simd (void);
  1943. GGML_API int ggml_cpu_has_blas (void);
  1944. GGML_API int ggml_cpu_has_cublas (void);
  1945. GGML_API int ggml_cpu_has_clblast (void);
  1946. GGML_API int ggml_cpu_has_vulkan (void);
  1947. GGML_API int ggml_cpu_has_kompute (void);
  1948. GGML_API int ggml_cpu_has_gpublas (void);
  1949. GGML_API int ggml_cpu_has_sse3 (void);
  1950. GGML_API int ggml_cpu_has_ssse3 (void);
  1951. GGML_API int ggml_cpu_has_sycl (void);
  1952. GGML_API int ggml_cpu_has_vsx (void);
  1953. GGML_API int ggml_cpu_has_matmul_int8(void);
  1954. //
  1955. // Internal types and functions exposed for tests and benchmarks
  1956. //
  1957. #ifdef __cplusplus
  1958. // restrict not standard in C++
  1959. #define GGML_RESTRICT
  1960. #else
  1961. #define GGML_RESTRICT restrict
  1962. #endif
  1963. typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1964. typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1965. typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
  1966. const void * GGML_RESTRICT y, size_t by, int nrc);
  1967. typedef struct {
  1968. const char * type_name;
  1969. int blck_size;
  1970. size_t type_size;
  1971. bool is_quantized;
  1972. ggml_to_float_t to_float;
  1973. ggml_from_float_t from_float;
  1974. ggml_from_float_t from_float_reference;
  1975. ggml_vec_dot_t vec_dot;
  1976. enum ggml_type vec_dot_type;
  1977. int64_t nrows; // number of rows to process simultaneously;
  1978. } ggml_type_traits_t;
  1979. GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
  1980. #ifdef __cplusplus
  1981. }
  1982. #endif