ggml.h 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph * gf = ggml_new_graph(ctx);
  61. // ggml_build_forward_expand(gf, f);
  62. //
  63. // // set the input variable and parameter values
  64. // ggml_set_f32(x, 2.0f);
  65. // ggml_set_f32(a, 3.0f);
  66. // ggml_set_f32(b, 4.0f);
  67. //
  68. // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
  69. //
  70. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  71. //
  72. // ...
  73. // }
  74. //
  75. // The actual computation is performed in the ggml_graph_compute() function.
  76. //
  77. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  78. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  79. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  80. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  81. // actually needed.
  82. //
  83. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  84. // differentiation and optimization algorithms.
  85. //
  86. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  87. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  88. // the user can avoid the memory allocation overhead at runtime.
  89. //
  90. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  91. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  92. //
  93. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  94. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  95. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  96. // yet, but a few examples are demonstrated in the following operations:
  97. //
  98. // - ggml_permute()
  99. // - ggml_conv_1d_1s()
  100. // - ggml_conv_1d_2s()
  101. //
  102. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  103. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  104. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  105. // calculus class, or watch the following video:
  106. //
  107. // What is Automatic Differentiation?
  108. // https://www.youtube.com/watch?v=wG_nF1awSSY
  109. //
  110. //
  111. // ## Tensor data (struct ggml_tensor)
  112. //
  113. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  114. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  115. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  116. //
  117. // {
  118. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  119. //
  120. // assert(c->src[0] == a);
  121. // assert(c->src[1] == b);
  122. // }
  123. //
  124. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  125. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  126. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  127. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  128. // contiguous in memory.
  129. //
  130. // The data of the tensor is accessed via the "data" pointer. For example:
  131. //
  132. // {
  133. // const int nx = 2;
  134. // const int ny = 3;
  135. //
  136. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny);
  137. //
  138. // for (int y = 0; y < ny; y++) {
  139. // for (int x = 0; x < nx; x++) {
  140. // *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y;
  141. // }
  142. // }
  143. //
  144. // ...
  145. // }
  146. //
  147. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  148. //
  149. // ## The matrix multiplication operator (ggml_mul_mat)
  150. //
  151. // TODO
  152. //
  153. //
  154. // ## Multi-threading
  155. //
  156. // TODO
  157. //
  158. //
  159. // ## Overview of ggml.c
  160. //
  161. // TODO
  162. //
  163. //
  164. // ## SIMD optimizations
  165. //
  166. // TODO
  167. //
  168. //
  169. // ## Debugging ggml
  170. //
  171. // TODO
  172. //
  173. //
  174. #ifdef GGML_SHARED
  175. # if defined(_WIN32) && !defined(__MINGW32__)
  176. # ifdef GGML_BUILD
  177. # define GGML_API __declspec(dllexport)
  178. # else
  179. # define GGML_API __declspec(dllimport)
  180. # endif
  181. # else
  182. # define GGML_API __attribute__ ((visibility ("default")))
  183. # endif
  184. #else
  185. # define GGML_API
  186. #endif
  187. // TODO: support for clang
  188. #ifdef __GNUC__
  189. # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  190. #elif defined(_MSC_VER)
  191. # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  192. #else
  193. # define GGML_DEPRECATED(func, hint) func
  194. #endif
  195. #ifndef __GNUC__
  196. # define GGML_ATTRIBUTE_FORMAT(...)
  197. #elif defined(__MINGW32__)
  198. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  199. #else
  200. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  201. #endif
  202. #include <stdint.h>
  203. #include <stddef.h>
  204. #include <stdbool.h>
  205. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  206. #define GGML_FILE_VERSION 1
  207. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  208. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  209. #define GGML_MAX_DIMS 4
  210. #define GGML_MAX_PARAMS 2048
  211. #define GGML_MAX_CONTEXTS 64
  212. #define GGML_MAX_SRC 10
  213. #ifndef GGML_MAX_NAME
  214. #define GGML_MAX_NAME 64
  215. #endif
  216. #define GGML_MAX_OP_PARAMS 64
  217. #define GGML_DEFAULT_N_THREADS 4
  218. #define GGML_DEFAULT_GRAPH_SIZE 2048
  219. #if UINTPTR_MAX == 0xFFFFFFFF
  220. #define GGML_MEM_ALIGN 4
  221. #else
  222. #define GGML_MEM_ALIGN 16
  223. #endif
  224. #define GGML_EXIT_SUCCESS 0
  225. #define GGML_EXIT_ABORTED 1
  226. #define GGUF_MAGIC "GGUF"
  227. #define GGUF_VERSION 3
  228. #define GGUF_DEFAULT_ALIGNMENT 32
  229. #define GGML_UNUSED(x) (void)(x)
  230. #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
  231. #define GGML_ASSERT(x) \
  232. do { \
  233. if (!(x)) { \
  234. fflush(stdout); \
  235. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  236. ggml_print_backtrace(); \
  237. abort(); \
  238. } \
  239. } while (0)
  240. #ifndef NDEBUG
  241. #define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
  242. #elif defined(__GNUC__)
  243. #define GGML_UNREACHABLE() __builtin_unreachable()
  244. #elif defined(_MSC_VER)
  245. #define GGML_UNREACHABLE() __assume(0)
  246. #else
  247. #define GGML_UNREACHABLE() ((void) 0)
  248. #endif
  249. // used to copy the number of elements and stride in bytes of tensors into local variables.
  250. // main purpose is to reduce code duplication and improve readability.
  251. //
  252. // example:
  253. //
  254. // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  255. // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  256. //
  257. #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
  258. const type prefix##0 = (pointer)->array[0]; \
  259. GGML_UNUSED(prefix##0);
  260. #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
  261. GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
  262. const type prefix##1 = (pointer)->array[1]; \
  263. GGML_UNUSED(prefix##1);
  264. #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
  265. GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
  266. const type prefix##2 = (pointer)->array[2]; \
  267. GGML_UNUSED(prefix##2);
  268. #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
  269. GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
  270. const type prefix##3 = (pointer)->array[3]; \
  271. GGML_UNUSED(prefix##3);
  272. #define GGML_TENSOR_UNARY_OP_LOCALS \
  273. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  274. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  275. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  276. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  277. #define GGML_TENSOR_BINARY_OP_LOCALS \
  278. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  279. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  280. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
  281. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
  282. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  283. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  284. #ifdef __cplusplus
  285. extern "C" {
  286. #endif
  287. #if defined(__ARM_NEON) && defined(__CUDACC__)
  288. typedef half ggml_fp16_t;
  289. #elif defined(__ARM_NEON) && !defined(_MSC_VER)
  290. typedef __fp16 ggml_fp16_t;
  291. #else
  292. typedef uint16_t ggml_fp16_t;
  293. #endif
  294. // convert FP16 <-> FP32
  295. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  296. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  297. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
  298. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
  299. struct ggml_object;
  300. struct ggml_context;
  301. enum ggml_type {
  302. GGML_TYPE_F32 = 0,
  303. GGML_TYPE_F16 = 1,
  304. GGML_TYPE_Q4_0 = 2,
  305. GGML_TYPE_Q4_1 = 3,
  306. // GGML_TYPE_Q4_2 = 4, support has been removed
  307. // GGML_TYPE_Q4_3 (5) support has been removed
  308. GGML_TYPE_Q5_0 = 6,
  309. GGML_TYPE_Q5_1 = 7,
  310. GGML_TYPE_Q8_0 = 8,
  311. GGML_TYPE_Q8_1 = 9,
  312. // k-quantizations
  313. GGML_TYPE_Q2_K = 10,
  314. GGML_TYPE_Q3_K = 11,
  315. GGML_TYPE_Q4_K = 12,
  316. GGML_TYPE_Q5_K = 13,
  317. GGML_TYPE_Q6_K = 14,
  318. GGML_TYPE_Q8_K = 15,
  319. GGML_TYPE_IQ2_XXS = 16,
  320. GGML_TYPE_IQ2_XS = 17,
  321. GGML_TYPE_I8,
  322. GGML_TYPE_I16,
  323. GGML_TYPE_I32,
  324. GGML_TYPE_COUNT,
  325. };
  326. // precision
  327. enum ggml_prec {
  328. GGML_PREC_DEFAULT,
  329. GGML_PREC_F32,
  330. };
  331. enum ggml_backend_type {
  332. GGML_BACKEND_CPU = 0,
  333. GGML_BACKEND_GPU = 10,
  334. GGML_BACKEND_GPU_SPLIT = 20,
  335. };
  336. // model file types
  337. enum ggml_ftype {
  338. GGML_FTYPE_UNKNOWN = -1,
  339. GGML_FTYPE_ALL_F32 = 0,
  340. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  341. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  342. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  343. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  344. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  345. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  346. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  347. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  348. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  349. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  350. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  351. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  352. GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors
  353. GGML_FTYPE_MOSTLY_IQ2_XS = 16, // except 1d tensors
  354. };
  355. // available tensor operations:
  356. enum ggml_op {
  357. GGML_OP_NONE = 0,
  358. GGML_OP_DUP,
  359. GGML_OP_ADD,
  360. GGML_OP_ADD1,
  361. GGML_OP_ACC,
  362. GGML_OP_SUB,
  363. GGML_OP_MUL,
  364. GGML_OP_DIV,
  365. GGML_OP_SQR,
  366. GGML_OP_SQRT,
  367. GGML_OP_LOG,
  368. GGML_OP_SUM,
  369. GGML_OP_SUM_ROWS,
  370. GGML_OP_MEAN,
  371. GGML_OP_ARGMAX,
  372. GGML_OP_REPEAT,
  373. GGML_OP_REPEAT_BACK,
  374. GGML_OP_CONCAT,
  375. GGML_OP_SILU_BACK,
  376. GGML_OP_NORM, // normalize
  377. GGML_OP_RMS_NORM,
  378. GGML_OP_RMS_NORM_BACK,
  379. GGML_OP_GROUP_NORM,
  380. GGML_OP_MUL_MAT,
  381. GGML_OP_MUL_MAT_ID,
  382. GGML_OP_OUT_PROD,
  383. GGML_OP_SCALE,
  384. GGML_OP_SET,
  385. GGML_OP_CPY,
  386. GGML_OP_CONT,
  387. GGML_OP_RESHAPE,
  388. GGML_OP_VIEW,
  389. GGML_OP_PERMUTE,
  390. GGML_OP_TRANSPOSE,
  391. GGML_OP_GET_ROWS,
  392. GGML_OP_GET_ROWS_BACK,
  393. GGML_OP_DIAG,
  394. GGML_OP_DIAG_MASK_INF,
  395. GGML_OP_DIAG_MASK_ZERO,
  396. GGML_OP_SOFT_MAX,
  397. GGML_OP_SOFT_MAX_BACK,
  398. GGML_OP_ROPE,
  399. GGML_OP_ROPE_BACK,
  400. GGML_OP_ALIBI,
  401. GGML_OP_CLAMP,
  402. GGML_OP_CONV_TRANSPOSE_1D,
  403. GGML_OP_IM2COL,
  404. GGML_OP_CONV_TRANSPOSE_2D,
  405. GGML_OP_POOL_1D,
  406. GGML_OP_POOL_2D,
  407. GGML_OP_UPSCALE, // nearest interpolate
  408. GGML_OP_PAD,
  409. GGML_OP_ARGSORT,
  410. GGML_OP_LEAKY_RELU,
  411. GGML_OP_FLASH_ATTN,
  412. GGML_OP_FLASH_FF,
  413. GGML_OP_FLASH_ATTN_BACK,
  414. GGML_OP_WIN_PART,
  415. GGML_OP_WIN_UNPART,
  416. GGML_OP_GET_REL_POS,
  417. GGML_OP_ADD_REL_POS,
  418. GGML_OP_UNARY,
  419. GGML_OP_MAP_UNARY,
  420. GGML_OP_MAP_BINARY,
  421. GGML_OP_MAP_CUSTOM1_F32,
  422. GGML_OP_MAP_CUSTOM2_F32,
  423. GGML_OP_MAP_CUSTOM3_F32,
  424. GGML_OP_MAP_CUSTOM1,
  425. GGML_OP_MAP_CUSTOM2,
  426. GGML_OP_MAP_CUSTOM3,
  427. GGML_OP_CROSS_ENTROPY_LOSS,
  428. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  429. GGML_OP_COUNT,
  430. };
  431. enum ggml_unary_op {
  432. GGML_UNARY_OP_ABS,
  433. GGML_UNARY_OP_SGN,
  434. GGML_UNARY_OP_NEG,
  435. GGML_UNARY_OP_STEP,
  436. GGML_UNARY_OP_TANH,
  437. GGML_UNARY_OP_ELU,
  438. GGML_UNARY_OP_RELU,
  439. GGML_UNARY_OP_GELU,
  440. GGML_UNARY_OP_GELU_QUICK,
  441. GGML_UNARY_OP_SILU,
  442. GGML_UNARY_OP_COUNT,
  443. };
  444. enum ggml_object_type {
  445. GGML_OBJECT_TENSOR,
  446. GGML_OBJECT_GRAPH,
  447. GGML_OBJECT_WORK_BUFFER
  448. };
  449. enum ggml_log_level {
  450. GGML_LOG_LEVEL_ERROR = 2,
  451. GGML_LOG_LEVEL_WARN = 3,
  452. GGML_LOG_LEVEL_INFO = 4,
  453. GGML_LOG_LEVEL_DEBUG = 5
  454. };
  455. // ggml object
  456. struct ggml_object {
  457. size_t offs;
  458. size_t size;
  459. struct ggml_object * next;
  460. enum ggml_object_type type;
  461. char padding[4];
  462. };
  463. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  464. // n-dimensional tensor
  465. struct ggml_tensor {
  466. enum ggml_type type;
  467. enum ggml_backend_type backend;
  468. struct ggml_backend_buffer * buffer;
  469. int64_t ne[GGML_MAX_DIMS]; // number of elements
  470. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  471. // nb[0] = ggml_type_size(type)
  472. // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding
  473. // nb[i] = nb[i-1] * ne[i-1]
  474. // compute data
  475. enum ggml_op op;
  476. // op params - allocated as int32_t for alignment
  477. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
  478. bool is_param;
  479. struct ggml_tensor * grad;
  480. struct ggml_tensor * src[GGML_MAX_SRC];
  481. // performance
  482. int perf_runs;
  483. int64_t perf_cycles;
  484. int64_t perf_time_us;
  485. struct ggml_tensor * view_src;
  486. size_t view_offs;
  487. void * data;
  488. char name[GGML_MAX_NAME];
  489. void * extra; // extra things e.g. for ggml-cuda.cu
  490. char padding[8];
  491. };
  492. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  493. // the compute plan that needs to be prepared for ggml_graph_compute()
  494. // since https://github.com/ggerganov/ggml/issues/287
  495. struct ggml_cplan {
  496. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  497. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  498. int n_threads;
  499. // abort ggml_graph_compute when true
  500. bool (*abort_callback)(void * data);
  501. void * abort_callback_data;
  502. };
  503. enum ggml_cgraph_eval_order {
  504. GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
  505. GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
  506. GGML_CGRAPH_EVAL_ORDER_COUNT
  507. };
  508. struct ggml_hash_set {
  509. size_t size;
  510. struct ggml_tensor ** keys;
  511. };
  512. // computation graph
  513. struct ggml_cgraph {
  514. int size;
  515. int n_nodes;
  516. int n_leafs;
  517. struct ggml_tensor ** nodes;
  518. struct ggml_tensor ** grads;
  519. struct ggml_tensor ** leafs;
  520. struct ggml_hash_set visited_hash_table;
  521. enum ggml_cgraph_eval_order order;
  522. // performance
  523. int perf_runs;
  524. int64_t perf_cycles;
  525. int64_t perf_time_us;
  526. };
  527. // scratch buffer
  528. struct ggml_scratch {
  529. size_t offs;
  530. size_t size;
  531. void * data;
  532. };
  533. struct ggml_init_params {
  534. // memory pool
  535. size_t mem_size; // bytes
  536. void * mem_buffer; // if NULL, memory will be allocated internally
  537. bool no_alloc; // don't allocate memory for the tensor data
  538. };
  539. // compute types
  540. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  541. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  542. enum ggml_task_type {
  543. GGML_TASK_INIT = 0,
  544. GGML_TASK_COMPUTE,
  545. GGML_TASK_FINALIZE,
  546. };
  547. struct ggml_compute_params {
  548. enum ggml_task_type type;
  549. // ith = thread index, nth = number of threads
  550. int ith, nth;
  551. // work buffer for all threads
  552. size_t wsize;
  553. void * wdata;
  554. };
  555. // misc
  556. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  557. GGML_API int64_t ggml_time_ms(void);
  558. GGML_API int64_t ggml_time_us(void);
  559. GGML_API int64_t ggml_cycles(void);
  560. GGML_API int64_t ggml_cycles_per_ms(void);
  561. GGML_API void ggml_print_backtrace(void);
  562. GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
  563. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  564. GGML_API void ggml_print_object (const struct ggml_object * obj);
  565. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  566. GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
  567. GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
  568. GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
  569. GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
  570. GGML_API int ggml_blck_size(enum ggml_type type);
  571. GGML_API size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block
  572. GGML_API size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row
  573. GGML_DEPRECATED(
  574. GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float
  575. "use ggml_row_size() instead");
  576. GGML_API const char * ggml_type_name(enum ggml_type type);
  577. GGML_API const char * ggml_op_name (enum ggml_op op);
  578. GGML_API const char * ggml_op_symbol(enum ggml_op op);
  579. GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op);
  580. GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name
  581. GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
  582. GGML_API bool ggml_is_quantized(enum ggml_type type);
  583. // TODO: temporary until model loading of ggml examples is refactored
  584. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  585. GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
  586. GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  587. GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
  588. GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor);
  589. GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor);
  590. GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor);
  591. GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
  592. GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
  593. GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
  594. // use this to compute the memory overhead of a tensor
  595. GGML_API size_t ggml_tensor_overhead(void);
  596. // main
  597. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  598. GGML_API void ggml_free(struct ggml_context * ctx);
  599. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  600. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  601. GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
  602. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  603. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  604. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  605. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  606. GGML_API struct ggml_tensor * ggml_new_tensor(
  607. struct ggml_context * ctx,
  608. enum ggml_type type,
  609. int n_dims,
  610. const int64_t *ne);
  611. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  612. struct ggml_context * ctx,
  613. enum ggml_type type,
  614. int64_t ne0);
  615. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  616. struct ggml_context * ctx,
  617. enum ggml_type type,
  618. int64_t ne0,
  619. int64_t ne1);
  620. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  621. struct ggml_context * ctx,
  622. enum ggml_type type,
  623. int64_t ne0,
  624. int64_t ne1,
  625. int64_t ne2);
  626. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  627. struct ggml_context * ctx,
  628. enum ggml_type type,
  629. int64_t ne0,
  630. int64_t ne1,
  631. int64_t ne2,
  632. int64_t ne3);
  633. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  634. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  635. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  636. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src);
  637. // Context tensor enumeration and lookup
  638. GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx);
  639. GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor);
  640. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  641. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  642. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  643. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  644. // Converts a flat index into coordinates
  645. GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
  646. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  647. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  648. GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  649. GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
  650. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  651. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  652. GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  653. GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
  654. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  655. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  656. GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
  657. GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
  658. GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
  659. GGML_ATTRIBUTE_FORMAT(2, 3)
  660. GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
  661. //
  662. // operations on tensors with backpropagation
  663. //
  664. GGML_API struct ggml_tensor * ggml_dup(
  665. struct ggml_context * ctx,
  666. struct ggml_tensor * a);
  667. // in-place, returns view(a)
  668. GGML_API struct ggml_tensor * ggml_dup_inplace(
  669. struct ggml_context * ctx,
  670. struct ggml_tensor * a);
  671. GGML_API struct ggml_tensor * ggml_add(
  672. struct ggml_context * ctx,
  673. struct ggml_tensor * a,
  674. struct ggml_tensor * b);
  675. GGML_API struct ggml_tensor * ggml_add_inplace(
  676. struct ggml_context * ctx,
  677. struct ggml_tensor * a,
  678. struct ggml_tensor * b);
  679. GGML_API struct ggml_tensor * ggml_add_cast(
  680. struct ggml_context * ctx,
  681. struct ggml_tensor * a,
  682. struct ggml_tensor * b,
  683. enum ggml_type type);
  684. GGML_API struct ggml_tensor * ggml_add1(
  685. struct ggml_context * ctx,
  686. struct ggml_tensor * a,
  687. struct ggml_tensor * b);
  688. GGML_API struct ggml_tensor * ggml_add1_inplace(
  689. struct ggml_context * ctx,
  690. struct ggml_tensor * a,
  691. struct ggml_tensor * b);
  692. // dst = a
  693. // view(dst, nb1, nb2, nb3, offset) += b
  694. // return dst
  695. GGML_API struct ggml_tensor * ggml_acc(
  696. struct ggml_context * ctx,
  697. struct ggml_tensor * a,
  698. struct ggml_tensor * b,
  699. size_t nb1,
  700. size_t nb2,
  701. size_t nb3,
  702. size_t offset);
  703. GGML_API struct ggml_tensor * ggml_acc_inplace(
  704. struct ggml_context * ctx,
  705. struct ggml_tensor * a,
  706. struct ggml_tensor * b,
  707. size_t nb1,
  708. size_t nb2,
  709. size_t nb3,
  710. size_t offset);
  711. GGML_API struct ggml_tensor * ggml_sub(
  712. struct ggml_context * ctx,
  713. struct ggml_tensor * a,
  714. struct ggml_tensor * b);
  715. GGML_API struct ggml_tensor * ggml_sub_inplace(
  716. struct ggml_context * ctx,
  717. struct ggml_tensor * a,
  718. struct ggml_tensor * b);
  719. GGML_API struct ggml_tensor * ggml_mul(
  720. struct ggml_context * ctx,
  721. struct ggml_tensor * a,
  722. struct ggml_tensor * b);
  723. GGML_API struct ggml_tensor * ggml_mul_inplace(
  724. struct ggml_context * ctx,
  725. struct ggml_tensor * a,
  726. struct ggml_tensor * b);
  727. GGML_API struct ggml_tensor * ggml_div(
  728. struct ggml_context * ctx,
  729. struct ggml_tensor * a,
  730. struct ggml_tensor * b);
  731. GGML_API struct ggml_tensor * ggml_div_inplace(
  732. struct ggml_context * ctx,
  733. struct ggml_tensor * a,
  734. struct ggml_tensor * b);
  735. GGML_API struct ggml_tensor * ggml_sqr(
  736. struct ggml_context * ctx,
  737. struct ggml_tensor * a);
  738. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  739. struct ggml_context * ctx,
  740. struct ggml_tensor * a);
  741. GGML_API struct ggml_tensor * ggml_sqrt(
  742. struct ggml_context * ctx,
  743. struct ggml_tensor * a);
  744. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  745. struct ggml_context * ctx,
  746. struct ggml_tensor * a);
  747. GGML_API struct ggml_tensor * ggml_log(
  748. struct ggml_context * ctx,
  749. struct ggml_tensor * a);
  750. GGML_API struct ggml_tensor * ggml_log_inplace(
  751. struct ggml_context * ctx,
  752. struct ggml_tensor * a);
  753. // return scalar
  754. GGML_API struct ggml_tensor * ggml_sum(
  755. struct ggml_context * ctx,
  756. struct ggml_tensor * a);
  757. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  758. GGML_API struct ggml_tensor * ggml_sum_rows(
  759. struct ggml_context * ctx,
  760. struct ggml_tensor * a);
  761. // mean along rows
  762. GGML_API struct ggml_tensor * ggml_mean(
  763. struct ggml_context * ctx,
  764. struct ggml_tensor * a);
  765. // argmax along rows
  766. GGML_API struct ggml_tensor * ggml_argmax(
  767. struct ggml_context * ctx,
  768. struct ggml_tensor * a);
  769. // if a is the same shape as b, and a is not parameter, return a
  770. // otherwise, return a new tensor: repeat(a) to fit in b
  771. GGML_API struct ggml_tensor * ggml_repeat(
  772. struct ggml_context * ctx,
  773. struct ggml_tensor * a,
  774. struct ggml_tensor * b);
  775. // sums repetitions in a into shape of b
  776. GGML_API struct ggml_tensor * ggml_repeat_back(
  777. struct ggml_context * ctx,
  778. struct ggml_tensor * a,
  779. struct ggml_tensor * b);
  780. // concat a and b on dim 2
  781. // used in stable-diffusion
  782. GGML_API struct ggml_tensor * ggml_concat(
  783. struct ggml_context * ctx,
  784. struct ggml_tensor * a,
  785. struct ggml_tensor * b);
  786. GGML_API struct ggml_tensor * ggml_abs(
  787. struct ggml_context * ctx,
  788. struct ggml_tensor * a);
  789. GGML_API struct ggml_tensor * ggml_abs_inplace(
  790. struct ggml_context * ctx,
  791. struct ggml_tensor * a);
  792. GGML_API struct ggml_tensor * ggml_sgn(
  793. struct ggml_context * ctx,
  794. struct ggml_tensor * a);
  795. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  796. struct ggml_context * ctx,
  797. struct ggml_tensor * a);
  798. GGML_API struct ggml_tensor * ggml_neg(
  799. struct ggml_context * ctx,
  800. struct ggml_tensor * a);
  801. GGML_API struct ggml_tensor * ggml_neg_inplace(
  802. struct ggml_context * ctx,
  803. struct ggml_tensor * a);
  804. GGML_API struct ggml_tensor * ggml_step(
  805. struct ggml_context * ctx,
  806. struct ggml_tensor * a);
  807. GGML_API struct ggml_tensor * ggml_step_inplace(
  808. struct ggml_context * ctx,
  809. struct ggml_tensor * a);
  810. GGML_API struct ggml_tensor * ggml_tanh(
  811. struct ggml_context * ctx,
  812. struct ggml_tensor * a);
  813. GGML_API struct ggml_tensor * ggml_tanh_inplace(
  814. struct ggml_context * ctx,
  815. struct ggml_tensor * a);
  816. GGML_API struct ggml_tensor * ggml_elu(
  817. struct ggml_context * ctx,
  818. struct ggml_tensor * a);
  819. GGML_API struct ggml_tensor * ggml_elu_inplace(
  820. struct ggml_context * ctx,
  821. struct ggml_tensor * a);
  822. GGML_API struct ggml_tensor * ggml_relu(
  823. struct ggml_context * ctx,
  824. struct ggml_tensor * a);
  825. GGML_API struct ggml_tensor * ggml_leaky_relu(
  826. struct ggml_context * ctx,
  827. struct ggml_tensor * a, float negative_slope, bool inplace);
  828. GGML_API struct ggml_tensor * ggml_relu_inplace(
  829. struct ggml_context * ctx,
  830. struct ggml_tensor * a);
  831. GGML_API struct ggml_tensor * ggml_gelu(
  832. struct ggml_context * ctx,
  833. struct ggml_tensor * a);
  834. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  835. struct ggml_context * ctx,
  836. struct ggml_tensor * a);
  837. GGML_API struct ggml_tensor * ggml_gelu_quick(
  838. struct ggml_context * ctx,
  839. struct ggml_tensor * a);
  840. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  841. struct ggml_context * ctx,
  842. struct ggml_tensor * a);
  843. GGML_API struct ggml_tensor * ggml_silu(
  844. struct ggml_context * ctx,
  845. struct ggml_tensor * a);
  846. GGML_API struct ggml_tensor * ggml_silu_inplace(
  847. struct ggml_context * ctx,
  848. struct ggml_tensor * a);
  849. // a - x
  850. // b - dy
  851. GGML_API struct ggml_tensor * ggml_silu_back(
  852. struct ggml_context * ctx,
  853. struct ggml_tensor * a,
  854. struct ggml_tensor * b);
  855. // normalize along rows
  856. GGML_API struct ggml_tensor * ggml_norm(
  857. struct ggml_context * ctx,
  858. struct ggml_tensor * a,
  859. float eps);
  860. GGML_API struct ggml_tensor * ggml_norm_inplace(
  861. struct ggml_context * ctx,
  862. struct ggml_tensor * a,
  863. float eps);
  864. GGML_API struct ggml_tensor * ggml_rms_norm(
  865. struct ggml_context * ctx,
  866. struct ggml_tensor * a,
  867. float eps);
  868. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  869. struct ggml_context * ctx,
  870. struct ggml_tensor * a,
  871. float eps);
  872. // group normalize along ne0*ne1*n_groups
  873. // used in stable-diffusion
  874. // TODO: eps is hardcoded to 1e-6 for now
  875. GGML_API struct ggml_tensor * ggml_group_norm(
  876. struct ggml_context * ctx,
  877. struct ggml_tensor * a,
  878. int n_groups);
  879. GGML_API struct ggml_tensor * ggml_group_norm_inplace(
  880. struct ggml_context * ctx,
  881. struct ggml_tensor * a,
  882. int n_groups);
  883. // a - x
  884. // b - dy
  885. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  886. struct ggml_context * ctx,
  887. struct ggml_tensor * a,
  888. struct ggml_tensor * b,
  889. float eps);
  890. // A: k columns, n rows => [ne03, ne02, n, k]
  891. // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k]
  892. // result is n columns, m rows => [ne03 * x, ne02 * y, m, n]
  893. GGML_API struct ggml_tensor * ggml_mul_mat(
  894. struct ggml_context * ctx,
  895. struct ggml_tensor * a,
  896. struct ggml_tensor * b);
  897. // change the precision of a matrix multiplication
  898. // set to GGML_PREC_F32 for higher precision (useful for phi-2)
  899. GGML_API void ggml_mul_mat_set_prec(
  900. struct ggml_tensor * a,
  901. enum ggml_prec prec);
  902. // indirect matrix multiplication
  903. // ggml_mul_mat_id(ctx, as, ids, id, b) ~= ggml_mul_mat(as[ids[id]], b)
  904. GGML_API struct ggml_tensor * ggml_mul_mat_id(
  905. struct ggml_context * ctx,
  906. struct ggml_tensor * const as[],
  907. int n_as,
  908. struct ggml_tensor * ids,
  909. int id,
  910. struct ggml_tensor * b);
  911. // A: m columns, n rows,
  912. // B: p columns, n rows,
  913. // result is m columns, p rows
  914. GGML_API struct ggml_tensor * ggml_out_prod(
  915. struct ggml_context * ctx,
  916. struct ggml_tensor * a,
  917. struct ggml_tensor * b);
  918. //
  919. // operations on tensors without backpropagation
  920. //
  921. GGML_API struct ggml_tensor * ggml_scale(
  922. struct ggml_context * ctx,
  923. struct ggml_tensor * a,
  924. float s);
  925. // in-place, returns view(a)
  926. GGML_API struct ggml_tensor * ggml_scale_inplace(
  927. struct ggml_context * ctx,
  928. struct ggml_tensor * a,
  929. float s);
  930. // b -> view(a,offset,nb1,nb2,3), return modified a
  931. GGML_API struct ggml_tensor * ggml_set(
  932. struct ggml_context * ctx,
  933. struct ggml_tensor * a,
  934. struct ggml_tensor * b,
  935. size_t nb1,
  936. size_t nb2,
  937. size_t nb3,
  938. size_t offset);
  939. // b -> view(a,offset,nb1,nb2,3), return view(a)
  940. GGML_API struct ggml_tensor * ggml_set_inplace(
  941. struct ggml_context * ctx,
  942. struct ggml_tensor * a,
  943. struct ggml_tensor * b,
  944. size_t nb1,
  945. size_t nb2,
  946. size_t nb3,
  947. size_t offset);
  948. GGML_API struct ggml_tensor * ggml_set_1d(
  949. struct ggml_context * ctx,
  950. struct ggml_tensor * a,
  951. struct ggml_tensor * b,
  952. size_t offset);
  953. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  954. struct ggml_context * ctx,
  955. struct ggml_tensor * a,
  956. struct ggml_tensor * b,
  957. size_t offset);
  958. // b -> view(a,offset,nb1,nb2,3), return modified a
  959. GGML_API struct ggml_tensor * ggml_set_2d(
  960. struct ggml_context * ctx,
  961. struct ggml_tensor * a,
  962. struct ggml_tensor * b,
  963. size_t nb1,
  964. size_t offset);
  965. // b -> view(a,offset,nb1,nb2,3), return view(a)
  966. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  967. struct ggml_context * ctx,
  968. struct ggml_tensor * a,
  969. struct ggml_tensor * b,
  970. size_t nb1,
  971. size_t offset);
  972. // a -> b, return view(b)
  973. GGML_API struct ggml_tensor * ggml_cpy(
  974. struct ggml_context * ctx,
  975. struct ggml_tensor * a,
  976. struct ggml_tensor * b);
  977. GGML_API struct ggml_tensor * ggml_cast(
  978. struct ggml_context * ctx,
  979. struct ggml_tensor * a,
  980. enum ggml_type type);
  981. // make contiguous
  982. GGML_API struct ggml_tensor * ggml_cont(
  983. struct ggml_context * ctx,
  984. struct ggml_tensor * a);
  985. // make contiguous, with new shape
  986. GGML_API struct ggml_tensor * ggml_cont_1d(
  987. struct ggml_context * ctx,
  988. struct ggml_tensor * a,
  989. int64_t ne0);
  990. GGML_API struct ggml_tensor * ggml_cont_2d(
  991. struct ggml_context * ctx,
  992. struct ggml_tensor * a,
  993. int64_t ne0,
  994. int64_t ne1);
  995. GGML_API struct ggml_tensor * ggml_cont_3d(
  996. struct ggml_context * ctx,
  997. struct ggml_tensor * a,
  998. int64_t ne0,
  999. int64_t ne1,
  1000. int64_t ne2);
  1001. GGML_API struct ggml_tensor * ggml_cont_4d(
  1002. struct ggml_context * ctx,
  1003. struct ggml_tensor * a,
  1004. int64_t ne0,
  1005. int64_t ne1,
  1006. int64_t ne2,
  1007. int64_t ne3);
  1008. // return view(a), b specifies the new shape
  1009. // TODO: when we start computing gradient, make a copy instead of view
  1010. GGML_API struct ggml_tensor * ggml_reshape(
  1011. struct ggml_context * ctx,
  1012. struct ggml_tensor * a,
  1013. struct ggml_tensor * b);
  1014. // return view(a)
  1015. // TODO: when we start computing gradient, make a copy instead of view
  1016. GGML_API struct ggml_tensor * ggml_reshape_1d(
  1017. struct ggml_context * ctx,
  1018. struct ggml_tensor * a,
  1019. int64_t ne0);
  1020. GGML_API struct ggml_tensor * ggml_reshape_2d(
  1021. struct ggml_context * ctx,
  1022. struct ggml_tensor * a,
  1023. int64_t ne0,
  1024. int64_t ne1);
  1025. // return view(a)
  1026. // TODO: when we start computing gradient, make a copy instead of view
  1027. GGML_API struct ggml_tensor * ggml_reshape_3d(
  1028. struct ggml_context * ctx,
  1029. struct ggml_tensor * a,
  1030. int64_t ne0,
  1031. int64_t ne1,
  1032. int64_t ne2);
  1033. GGML_API struct ggml_tensor * ggml_reshape_4d(
  1034. struct ggml_context * ctx,
  1035. struct ggml_tensor * a,
  1036. int64_t ne0,
  1037. int64_t ne1,
  1038. int64_t ne2,
  1039. int64_t ne3);
  1040. // offset in bytes
  1041. GGML_API struct ggml_tensor * ggml_view_1d(
  1042. struct ggml_context * ctx,
  1043. struct ggml_tensor * a,
  1044. int64_t ne0,
  1045. size_t offset);
  1046. GGML_API struct ggml_tensor * ggml_view_2d(
  1047. struct ggml_context * ctx,
  1048. struct ggml_tensor * a,
  1049. int64_t ne0,
  1050. int64_t ne1,
  1051. size_t nb1, // row stride in bytes
  1052. size_t offset);
  1053. GGML_API struct ggml_tensor * ggml_view_3d(
  1054. struct ggml_context * ctx,
  1055. struct ggml_tensor * a,
  1056. int64_t ne0,
  1057. int64_t ne1,
  1058. int64_t ne2,
  1059. size_t nb1, // row stride in bytes
  1060. size_t nb2, // slice stride in bytes
  1061. size_t offset);
  1062. GGML_API struct ggml_tensor * ggml_view_4d(
  1063. struct ggml_context * ctx,
  1064. struct ggml_tensor * a,
  1065. int64_t ne0,
  1066. int64_t ne1,
  1067. int64_t ne2,
  1068. int64_t ne3,
  1069. size_t nb1, // row stride in bytes
  1070. size_t nb2, // slice stride in bytes
  1071. size_t nb3,
  1072. size_t offset);
  1073. GGML_API struct ggml_tensor * ggml_permute(
  1074. struct ggml_context * ctx,
  1075. struct ggml_tensor * a,
  1076. int axis0,
  1077. int axis1,
  1078. int axis2,
  1079. int axis3);
  1080. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  1081. GGML_API struct ggml_tensor * ggml_transpose(
  1082. struct ggml_context * ctx,
  1083. struct ggml_tensor * a);
  1084. // supports 3D: a->ne[2] == b->ne[1]
  1085. GGML_API struct ggml_tensor * ggml_get_rows(
  1086. struct ggml_context * ctx,
  1087. struct ggml_tensor * a,
  1088. struct ggml_tensor * b);
  1089. GGML_API struct ggml_tensor * ggml_get_rows_back(
  1090. struct ggml_context * ctx,
  1091. struct ggml_tensor * a,
  1092. struct ggml_tensor * b,
  1093. struct ggml_tensor * c);
  1094. GGML_API struct ggml_tensor * ggml_diag(
  1095. struct ggml_context * ctx,
  1096. struct ggml_tensor * a);
  1097. // set elements above the diagonal to -INF
  1098. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  1099. struct ggml_context * ctx,
  1100. struct ggml_tensor * a,
  1101. int n_past);
  1102. // in-place, returns view(a)
  1103. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  1104. struct ggml_context * ctx,
  1105. struct ggml_tensor * a,
  1106. int n_past);
  1107. // set elements above the diagonal to 0
  1108. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  1109. struct ggml_context * ctx,
  1110. struct ggml_tensor * a,
  1111. int n_past);
  1112. // in-place, returns view(a)
  1113. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  1114. struct ggml_context * ctx,
  1115. struct ggml_tensor * a,
  1116. int n_past);
  1117. GGML_API struct ggml_tensor * ggml_soft_max(
  1118. struct ggml_context * ctx,
  1119. struct ggml_tensor * a);
  1120. // in-place, returns view(a)
  1121. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  1122. struct ggml_context * ctx,
  1123. struct ggml_tensor * a);
  1124. // fused soft_max(a*scale + mask)
  1125. // mask is optional
  1126. GGML_API struct ggml_tensor * ggml_soft_max_ext(
  1127. struct ggml_context * ctx,
  1128. struct ggml_tensor * a,
  1129. struct ggml_tensor * mask,
  1130. float scale);
  1131. GGML_API struct ggml_tensor * ggml_soft_max_back(
  1132. struct ggml_context * ctx,
  1133. struct ggml_tensor * a,
  1134. struct ggml_tensor * b);
  1135. // in-place, returns view(a)
  1136. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  1137. struct ggml_context * ctx,
  1138. struct ggml_tensor * a,
  1139. struct ggml_tensor * b);
  1140. // rotary position embedding
  1141. // if mode & 1 == 1, skip n_past elements (DEPRECATED)
  1142. // if mode & 2 == 1, GPT-NeoX style
  1143. // if mode & 4 == 1, ChatGLM style
  1144. //
  1145. // b is an int32 vector with size a->ne[2], it contains the positions
  1146. GGML_API struct ggml_tensor * ggml_rope(
  1147. struct ggml_context * ctx,
  1148. struct ggml_tensor * a,
  1149. struct ggml_tensor * b,
  1150. int n_dims,
  1151. int mode,
  1152. int n_ctx);
  1153. // in-place, returns view(a)
  1154. GGML_API struct ggml_tensor * ggml_rope_inplace(
  1155. struct ggml_context * ctx,
  1156. struct ggml_tensor * a,
  1157. struct ggml_tensor * b,
  1158. int n_dims,
  1159. int mode,
  1160. int n_ctx);
  1161. // custom RoPE
  1162. GGML_API struct ggml_tensor * ggml_rope_custom(
  1163. struct ggml_context * ctx,
  1164. struct ggml_tensor * a,
  1165. struct ggml_tensor * b,
  1166. int n_dims,
  1167. int mode,
  1168. int n_ctx,
  1169. int n_orig_ctx,
  1170. float freq_base,
  1171. float freq_scale,
  1172. float ext_factor,
  1173. float attn_factor,
  1174. float beta_fast,
  1175. float beta_slow);
  1176. // in-place, returns view(a)
  1177. GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
  1178. struct ggml_context * ctx,
  1179. struct ggml_tensor * a,
  1180. struct ggml_tensor * b,
  1181. int n_dims,
  1182. int mode,
  1183. int n_ctx,
  1184. int n_orig_ctx,
  1185. float freq_base,
  1186. float freq_scale,
  1187. float ext_factor,
  1188. float attn_factor,
  1189. float beta_fast,
  1190. float beta_slow);
  1191. // compute correction dims for YaRN RoPE scaling
  1192. void ggml_rope_yarn_corr_dims(
  1193. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
  1194. // xPos RoPE, in-place, returns view(a)
  1195. GGML_API struct ggml_tensor * ggml_rope_xpos_inplace(
  1196. struct ggml_context * ctx,
  1197. struct ggml_tensor * a,
  1198. struct ggml_tensor * b,
  1199. int n_dims,
  1200. float base,
  1201. bool down);
  1202. // rotary position embedding backward, i.e compute dx from dy
  1203. // a - dy
  1204. GGML_API struct ggml_tensor * ggml_rope_back(
  1205. struct ggml_context * ctx,
  1206. struct ggml_tensor * a,
  1207. struct ggml_tensor * b,
  1208. int n_dims,
  1209. int mode,
  1210. int n_ctx,
  1211. int n_orig_ctx,
  1212. float freq_base,
  1213. float freq_scale,
  1214. float ext_factor,
  1215. float attn_factor,
  1216. float beta_fast,
  1217. float beta_slow,
  1218. float xpos_base,
  1219. bool xpos_down);
  1220. // alibi position embedding
  1221. // in-place, returns view(a)
  1222. GGML_API struct ggml_tensor * ggml_alibi(
  1223. struct ggml_context * ctx,
  1224. struct ggml_tensor * a,
  1225. int n_past,
  1226. int n_head,
  1227. float bias_max);
  1228. // clamp
  1229. // in-place, returns view(a)
  1230. GGML_API struct ggml_tensor * ggml_clamp(
  1231. struct ggml_context * ctx,
  1232. struct ggml_tensor * a,
  1233. float min,
  1234. float max);
  1235. GGML_API struct ggml_tensor * ggml_im2col(
  1236. struct ggml_context * ctx,
  1237. struct ggml_tensor * a,
  1238. struct ggml_tensor * b,
  1239. int s0,
  1240. int s1,
  1241. int p0,
  1242. int p1,
  1243. int d0,
  1244. int d1,
  1245. bool is_2D);
  1246. GGML_API struct ggml_tensor * ggml_conv_1d(
  1247. struct ggml_context * ctx,
  1248. struct ggml_tensor * a,
  1249. struct ggml_tensor * b,
  1250. int s0, // stride
  1251. int p0, // padding
  1252. int d0); // dilation
  1253. // conv_1d with padding = half
  1254. // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
  1255. GGML_API struct ggml_tensor* ggml_conv_1d_ph(
  1256. struct ggml_context * ctx,
  1257. struct ggml_tensor * a,
  1258. struct ggml_tensor * b,
  1259. int s,
  1260. int d);
  1261. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  1262. struct ggml_context * ctx,
  1263. struct ggml_tensor * a,
  1264. struct ggml_tensor * b,
  1265. int s0,
  1266. int p0,
  1267. int d0);
  1268. GGML_API struct ggml_tensor * ggml_conv_2d(
  1269. struct ggml_context * ctx,
  1270. struct ggml_tensor * a,
  1271. struct ggml_tensor * b,
  1272. int s0,
  1273. int s1,
  1274. int p0,
  1275. int p1,
  1276. int d0,
  1277. int d1);
  1278. // kernel size is a->ne[0] x a->ne[1]
  1279. // stride is equal to kernel size
  1280. // padding is zero
  1281. // example:
  1282. // a: 16 16 3 768
  1283. // b: 1024 1024 3 1
  1284. // res: 64 64 768 1
  1285. // used in sam
  1286. GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
  1287. struct ggml_context * ctx,
  1288. struct ggml_tensor * a,
  1289. struct ggml_tensor * b);
  1290. // kernel size is a->ne[0] x a->ne[1]
  1291. // stride is 1
  1292. // padding is half
  1293. // example:
  1294. // a: 3 3 256 256
  1295. // b: 64 64 256 1
  1296. // res: 64 64 256 1
  1297. // used in sam
  1298. GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph(
  1299. struct ggml_context * ctx,
  1300. struct ggml_tensor * a,
  1301. struct ggml_tensor * b);
  1302. GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0(
  1303. struct ggml_context * ctx,
  1304. struct ggml_tensor * a,
  1305. struct ggml_tensor * b,
  1306. int stride);
  1307. enum ggml_op_pool {
  1308. GGML_OP_POOL_MAX,
  1309. GGML_OP_POOL_AVG,
  1310. GGML_OP_POOL_COUNT,
  1311. };
  1312. GGML_API struct ggml_tensor * ggml_pool_1d(
  1313. struct ggml_context * ctx,
  1314. struct ggml_tensor * a,
  1315. enum ggml_op_pool op,
  1316. int k0, // kernel size
  1317. int s0, // stride
  1318. int p0); // padding
  1319. // the result will have 2*p0 padding for the first dimension
  1320. // and 2*p1 padding for the second dimension
  1321. GGML_API struct ggml_tensor * ggml_pool_2d(
  1322. struct ggml_context * ctx,
  1323. struct ggml_tensor * a,
  1324. enum ggml_op_pool op,
  1325. int k0,
  1326. int k1,
  1327. int s0,
  1328. int s1,
  1329. float p0,
  1330. float p1);
  1331. // nearest interpolate
  1332. // used in stable-diffusion
  1333. GGML_API struct ggml_tensor * ggml_upscale(
  1334. struct ggml_context * ctx,
  1335. struct ggml_tensor * a,
  1336. int scale_factor);
  1337. // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0]
  1338. GGML_API struct ggml_tensor * ggml_pad(
  1339. struct ggml_context * ctx,
  1340. struct ggml_tensor * a,
  1341. int p0,
  1342. int p1,
  1343. int p2,
  1344. int p3);
  1345. // sort rows
  1346. enum ggml_sort_order {
  1347. GGML_SORT_ASC,
  1348. GGML_SORT_DESC,
  1349. };
  1350. GGML_API struct ggml_tensor * ggml_argsort(
  1351. struct ggml_context * ctx,
  1352. struct ggml_tensor * a,
  1353. enum ggml_sort_order order);
  1354. // top k elements per row
  1355. GGML_API struct ggml_tensor * ggml_top_k(
  1356. struct ggml_context * ctx,
  1357. struct ggml_tensor * a,
  1358. int k);
  1359. GGML_API struct ggml_tensor * ggml_flash_attn(
  1360. struct ggml_context * ctx,
  1361. struct ggml_tensor * q,
  1362. struct ggml_tensor * k,
  1363. struct ggml_tensor * v,
  1364. bool masked);
  1365. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  1366. struct ggml_context * ctx,
  1367. struct ggml_tensor * q,
  1368. struct ggml_tensor * k,
  1369. struct ggml_tensor * v,
  1370. struct ggml_tensor * d,
  1371. bool masked);
  1372. GGML_API struct ggml_tensor * ggml_flash_ff(
  1373. struct ggml_context * ctx,
  1374. struct ggml_tensor * a,
  1375. struct ggml_tensor * b0,
  1376. struct ggml_tensor * b1,
  1377. struct ggml_tensor * c0,
  1378. struct ggml_tensor * c1);
  1379. // partition into non-overlapping windows with padding if needed
  1380. // example:
  1381. // a: 768 64 64 1
  1382. // w: 14
  1383. // res: 768 14 14 25
  1384. // used in sam
  1385. GGML_API struct ggml_tensor * ggml_win_part(
  1386. struct ggml_context * ctx,
  1387. struct ggml_tensor * a,
  1388. int w);
  1389. // reverse of ggml_win_part
  1390. // used in sam
  1391. GGML_API struct ggml_tensor * ggml_win_unpart(
  1392. struct ggml_context * ctx,
  1393. struct ggml_tensor * a,
  1394. int w0,
  1395. int h0,
  1396. int w);
  1397. GGML_API struct ggml_tensor * ggml_unary(
  1398. struct ggml_context * ctx,
  1399. struct ggml_tensor * a,
  1400. enum ggml_unary_op op);
  1401. GGML_API struct ggml_tensor * ggml_unary_inplace(
  1402. struct ggml_context * ctx,
  1403. struct ggml_tensor * a,
  1404. enum ggml_unary_op op);
  1405. // used in sam
  1406. GGML_API struct ggml_tensor * ggml_get_rel_pos(
  1407. struct ggml_context * ctx,
  1408. struct ggml_tensor * a,
  1409. int qh,
  1410. int kh);
  1411. // used in sam
  1412. GGML_API struct ggml_tensor * ggml_add_rel_pos(
  1413. struct ggml_context * ctx,
  1414. struct ggml_tensor * a,
  1415. struct ggml_tensor * pw,
  1416. struct ggml_tensor * ph);
  1417. GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace(
  1418. struct ggml_context * ctx,
  1419. struct ggml_tensor * a,
  1420. struct ggml_tensor * pw,
  1421. struct ggml_tensor * ph);
  1422. // custom operators
  1423. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1424. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1425. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1426. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1427. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1428. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1429. struct ggml_context * ctx,
  1430. struct ggml_tensor * a,
  1431. ggml_unary_op_f32_t fun),
  1432. "use ggml_map_custom1 instead");
  1433. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1434. struct ggml_context * ctx,
  1435. struct ggml_tensor * a,
  1436. ggml_unary_op_f32_t fun),
  1437. "use ggml_map_custom1_inplace instead");
  1438. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1439. struct ggml_context * ctx,
  1440. struct ggml_tensor * a,
  1441. struct ggml_tensor * b,
  1442. ggml_binary_op_f32_t fun),
  1443. "use ggml_map_custom2 instead");
  1444. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1445. struct ggml_context * ctx,
  1446. struct ggml_tensor * a,
  1447. struct ggml_tensor * b,
  1448. ggml_binary_op_f32_t fun),
  1449. "use ggml_map_custom2_inplace instead");
  1450. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1451. struct ggml_context * ctx,
  1452. struct ggml_tensor * a,
  1453. ggml_custom1_op_f32_t fun),
  1454. "use ggml_map_custom1 instead");
  1455. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1456. struct ggml_context * ctx,
  1457. struct ggml_tensor * a,
  1458. ggml_custom1_op_f32_t fun),
  1459. "use ggml_map_custom1_inplace instead");
  1460. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1461. struct ggml_context * ctx,
  1462. struct ggml_tensor * a,
  1463. struct ggml_tensor * b,
  1464. ggml_custom2_op_f32_t fun),
  1465. "use ggml_map_custom2 instead");
  1466. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1467. struct ggml_context * ctx,
  1468. struct ggml_tensor * a,
  1469. struct ggml_tensor * b,
  1470. ggml_custom2_op_f32_t fun),
  1471. "use ggml_map_custom2_inplace instead");
  1472. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1473. struct ggml_context * ctx,
  1474. struct ggml_tensor * a,
  1475. struct ggml_tensor * b,
  1476. struct ggml_tensor * c,
  1477. ggml_custom3_op_f32_t fun),
  1478. "use ggml_map_custom3 instead");
  1479. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1480. struct ggml_context * ctx,
  1481. struct ggml_tensor * a,
  1482. struct ggml_tensor * b,
  1483. struct ggml_tensor * c,
  1484. ggml_custom3_op_f32_t fun),
  1485. "use ggml_map_custom3_inplace instead");
  1486. // custom operators v2
  1487. typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
  1488. typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
  1489. typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
  1490. #define GGML_N_TASKS_MAX -1
  1491. GGML_API struct ggml_tensor * ggml_map_custom1(
  1492. struct ggml_context * ctx,
  1493. struct ggml_tensor * a,
  1494. ggml_custom1_op_t fun,
  1495. int n_tasks,
  1496. void * userdata);
  1497. GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
  1498. struct ggml_context * ctx,
  1499. struct ggml_tensor * a,
  1500. ggml_custom1_op_t fun,
  1501. int n_tasks,
  1502. void * userdata);
  1503. GGML_API struct ggml_tensor * ggml_map_custom2(
  1504. struct ggml_context * ctx,
  1505. struct ggml_tensor * a,
  1506. struct ggml_tensor * b,
  1507. ggml_custom2_op_t fun,
  1508. int n_tasks,
  1509. void * userdata);
  1510. GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
  1511. struct ggml_context * ctx,
  1512. struct ggml_tensor * a,
  1513. struct ggml_tensor * b,
  1514. ggml_custom2_op_t fun,
  1515. int n_tasks,
  1516. void * userdata);
  1517. GGML_API struct ggml_tensor * ggml_map_custom3(
  1518. struct ggml_context * ctx,
  1519. struct ggml_tensor * a,
  1520. struct ggml_tensor * b,
  1521. struct ggml_tensor * c,
  1522. ggml_custom3_op_t fun,
  1523. int n_tasks,
  1524. void * userdata);
  1525. GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
  1526. struct ggml_context * ctx,
  1527. struct ggml_tensor * a,
  1528. struct ggml_tensor * b,
  1529. struct ggml_tensor * c,
  1530. ggml_custom3_op_t fun,
  1531. int n_tasks,
  1532. void * userdata);
  1533. // loss function
  1534. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1535. struct ggml_context * ctx,
  1536. struct ggml_tensor * a,
  1537. struct ggml_tensor * b);
  1538. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1539. struct ggml_context * ctx,
  1540. struct ggml_tensor * a,
  1541. struct ggml_tensor * b,
  1542. struct ggml_tensor * c);
  1543. //
  1544. // automatic differentiation
  1545. //
  1546. GGML_API void ggml_set_param(
  1547. struct ggml_context * ctx,
  1548. struct ggml_tensor * tensor);
  1549. GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1550. GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
  1551. // graph allocation in a context
  1552. GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
  1553. GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads);
  1554. GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph);
  1555. GGML_API struct ggml_cgraph ggml_graph_view (struct ggml_cgraph * cgraph, int i0, int i1);
  1556. GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst);
  1557. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // zero grads
  1558. GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph);
  1559. GGML_API size_t ggml_graph_overhead(void);
  1560. GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
  1561. // ggml_graph_plan() has to be called before ggml_graph_compute()
  1562. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  1563. GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
  1564. GGML_API int ggml_graph_compute( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  1565. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  1566. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  1567. GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  1568. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1569. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1570. GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1571. // print info and performance information for the graph
  1572. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1573. // dump the graph into a file using the dot format
  1574. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1575. // build gradient checkpointing backward graph gb for gf using provided checkpoints
  1576. // gb_tmp will contain original backward graph with rewritten backward process nodes,
  1577. // but without the second forward pass nodes.
  1578. GGML_API void ggml_build_backward_gradient_checkpointing(
  1579. struct ggml_context * ctx,
  1580. struct ggml_cgraph * gf,
  1581. struct ggml_cgraph * gb,
  1582. struct ggml_cgraph * gb_tmp,
  1583. struct ggml_tensor * * checkpoints,
  1584. int n_checkpoints);
  1585. //
  1586. // optimization
  1587. //
  1588. // optimization methods
  1589. enum ggml_opt_type {
  1590. GGML_OPT_ADAM,
  1591. GGML_OPT_LBFGS,
  1592. };
  1593. // linesearch methods
  1594. enum ggml_linesearch {
  1595. GGML_LINESEARCH_DEFAULT = 1,
  1596. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1597. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1598. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1599. };
  1600. // optimization return values
  1601. enum ggml_opt_result {
  1602. GGML_OPT_OK = 0,
  1603. GGML_OPT_DID_NOT_CONVERGE,
  1604. GGML_OPT_NO_CONTEXT,
  1605. GGML_OPT_INVALID_WOLFE,
  1606. GGML_OPT_FAIL,
  1607. GGML_OPT_CANCEL,
  1608. GGML_LINESEARCH_FAIL = -128,
  1609. GGML_LINESEARCH_MINIMUM_STEP,
  1610. GGML_LINESEARCH_MAXIMUM_STEP,
  1611. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1612. GGML_LINESEARCH_INVALID_PARAMETERS,
  1613. };
  1614. typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
  1615. typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
  1616. // optimization parameters
  1617. //
  1618. // see ggml.c (ggml_opt_default_params) for default values
  1619. //
  1620. struct ggml_opt_params {
  1621. enum ggml_opt_type type;
  1622. size_t graph_size;
  1623. int n_threads;
  1624. // delta-based convergence test
  1625. //
  1626. // if past == 0 - disabled
  1627. // if past > 0:
  1628. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1629. //
  1630. int past;
  1631. float delta;
  1632. // maximum number of iterations without improvement
  1633. //
  1634. // if 0 - disabled
  1635. // if > 0:
  1636. // assume convergence if no cost improvement in this number of iterations
  1637. //
  1638. int max_no_improvement;
  1639. bool print_forward_graph;
  1640. bool print_backward_graph;
  1641. int n_gradient_accumulation;
  1642. // ADAM parameters
  1643. struct {
  1644. int n_iter;
  1645. float sched; // schedule multiplier (fixed, decay or warmup)
  1646. float decay; // weight decay for AdamW, use 0.0f to disable
  1647. int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
  1648. float alpha; // learning rate
  1649. float beta1;
  1650. float beta2;
  1651. float eps; // epsilon for numerical stability
  1652. float eps_f; // epsilon for convergence test
  1653. float eps_g; // epsilon for convergence test
  1654. float gclip; // gradient clipping
  1655. } adam;
  1656. // LBFGS parameters
  1657. struct {
  1658. int m; // number of corrections to approximate the inv. Hessian
  1659. int n_iter;
  1660. int max_linesearch;
  1661. float eps; // convergence tolerance
  1662. float ftol; // line search tolerance
  1663. float wolfe;
  1664. float min_step;
  1665. float max_step;
  1666. enum ggml_linesearch linesearch;
  1667. } lbfgs;
  1668. };
  1669. struct ggml_opt_context {
  1670. struct ggml_context * ctx;
  1671. struct ggml_opt_params params;
  1672. int iter;
  1673. int64_t nx; // number of parameter elements
  1674. bool just_initialized;
  1675. float loss_before;
  1676. float loss_after;
  1677. struct {
  1678. struct ggml_tensor * g; // current gradient
  1679. struct ggml_tensor * m; // first moment
  1680. struct ggml_tensor * v; // second moment
  1681. struct ggml_tensor * pf; // past function values
  1682. float fx_best;
  1683. float fx_prev;
  1684. int n_no_improvement;
  1685. } adam;
  1686. struct {
  1687. struct ggml_tensor * x; // current parameters
  1688. struct ggml_tensor * xp; // previous parameters
  1689. struct ggml_tensor * g; // current gradient
  1690. struct ggml_tensor * gp; // previous gradient
  1691. struct ggml_tensor * d; // search direction
  1692. struct ggml_tensor * pf; // past function values
  1693. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1694. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1695. struct ggml_tensor * lms; // the L-BFGS memory s
  1696. struct ggml_tensor * lmy; // the L-BFGS memory y
  1697. float fx_best;
  1698. float step;
  1699. int j;
  1700. int k;
  1701. int end;
  1702. int n_no_improvement;
  1703. } lbfgs;
  1704. };
  1705. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1706. // optimize the function defined by the tensor f
  1707. GGML_API enum ggml_opt_result ggml_opt(
  1708. struct ggml_context * ctx,
  1709. struct ggml_opt_params params,
  1710. struct ggml_tensor * f);
  1711. // initialize optimizer context
  1712. GGML_API void ggml_opt_init(
  1713. struct ggml_context * ctx,
  1714. struct ggml_opt_context * opt,
  1715. struct ggml_opt_params params,
  1716. int64_t nx);
  1717. // continue optimizing the function defined by the tensor f
  1718. GGML_API enum ggml_opt_result ggml_opt_resume(
  1719. struct ggml_context * ctx,
  1720. struct ggml_opt_context * opt,
  1721. struct ggml_tensor * f);
  1722. // continue optimizing the function defined by the tensor f
  1723. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1724. struct ggml_context * ctx,
  1725. struct ggml_opt_context * opt,
  1726. struct ggml_tensor * f,
  1727. struct ggml_cgraph * gf,
  1728. struct ggml_cgraph * gb,
  1729. ggml_opt_callback callback,
  1730. void * callback_data);
  1731. //
  1732. // quantization
  1733. //
  1734. // TODO: these would probably get removed in favor of the more general ggml_quantize_chunk
  1735. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1736. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1737. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1738. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1739. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1740. GGML_API size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1741. GGML_API size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1742. GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1743. GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1744. GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
  1745. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst,
  1746. int start, int nrows, int n_per_row, int64_t * hist, const float * imatrix);
  1747. // These are needed for IQ2_XS and IQ2_XXS quantizations
  1748. GGML_API void ggml_init_iq2_quantization(enum ggml_type type);
  1749. GGML_API void ggml_deinit_iq2_quantization(enum ggml_type type);
  1750. //
  1751. // Importance matrix
  1752. //
  1753. typedef void(*ggml_collect_imatrix_t)(const struct ggml_tensor * src0, const struct ggml_tensor * src1);
  1754. GGML_API void ggml_set_imatrix_collection(ggml_collect_imatrix_t imatrix_collect);
  1755. //
  1756. // gguf
  1757. //
  1758. enum gguf_type {
  1759. GGUF_TYPE_UINT8 = 0,
  1760. GGUF_TYPE_INT8 = 1,
  1761. GGUF_TYPE_UINT16 = 2,
  1762. GGUF_TYPE_INT16 = 3,
  1763. GGUF_TYPE_UINT32 = 4,
  1764. GGUF_TYPE_INT32 = 5,
  1765. GGUF_TYPE_FLOAT32 = 6,
  1766. GGUF_TYPE_BOOL = 7,
  1767. GGUF_TYPE_STRING = 8,
  1768. GGUF_TYPE_ARRAY = 9,
  1769. GGUF_TYPE_UINT64 = 10,
  1770. GGUF_TYPE_INT64 = 11,
  1771. GGUF_TYPE_FLOAT64 = 12,
  1772. GGUF_TYPE_COUNT, // marks the end of the enum
  1773. };
  1774. struct gguf_context;
  1775. struct gguf_init_params {
  1776. bool no_alloc;
  1777. // if not NULL, create a ggml_context and allocate the tensor data in it
  1778. struct ggml_context ** ctx;
  1779. };
  1780. GGML_API struct gguf_context * gguf_init_empty(void);
  1781. GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
  1782. //GGML_API struct gguf_context * gguf_init_from_buffer(..);
  1783. GGML_API void gguf_free(struct gguf_context * ctx);
  1784. GGML_API const char * gguf_type_name(enum gguf_type type);
  1785. GGML_API int gguf_get_version (const struct gguf_context * ctx);
  1786. GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx);
  1787. GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx);
  1788. GGML_API void * gguf_get_data (const struct gguf_context * ctx);
  1789. GGML_API int gguf_get_n_kv(const struct gguf_context * ctx);
  1790. GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key);
  1791. GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
  1792. GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
  1793. GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
  1794. // will abort if the wrong type is used for the key
  1795. GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id);
  1796. GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id);
  1797. GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
  1798. GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
  1799. GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
  1800. GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
  1801. GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
  1802. GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
  1803. GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
  1804. GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
  1805. GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
  1806. GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
  1807. GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id);
  1808. GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id);
  1809. GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
  1810. GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
  1811. GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx);
  1812. GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name);
  1813. GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i);
  1814. GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i);
  1815. GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i);
  1816. // overrides existing values or adds a new one
  1817. GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val);
  1818. GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);
  1819. GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
  1820. GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);
  1821. GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
  1822. GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);
  1823. GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);
  1824. GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val);
  1825. GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val);
  1826. GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val);
  1827. GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);
  1828. GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
  1829. GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
  1830. GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
  1831. // set or add KV pairs from another context
  1832. GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
  1833. // manage tensor info
  1834. GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
  1835. GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
  1836. GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
  1837. // writing gguf files can be done in 2 ways:
  1838. //
  1839. // - write the entire gguf_context to a binary file in a single pass:
  1840. //
  1841. // gguf_write_to_file(ctx, fname);
  1842. //
  1843. // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
  1844. //
  1845. // FILE * f = fopen(fname, "wb");
  1846. // fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
  1847. // fwrite(f, ...);
  1848. // void * data = gguf_meta_get_meta_data(ctx);
  1849. // fseek(f, 0, SEEK_SET);
  1850. // fwrite(f, data, gguf_get_meta_size(ctx));
  1851. // free(data);
  1852. // fclose(f);
  1853. //
  1854. // write the entire context to a binary file
  1855. GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
  1856. // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
  1857. GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
  1858. GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data);
  1859. //
  1860. // system info
  1861. //
  1862. GGML_API int ggml_cpu_has_avx (void);
  1863. GGML_API int ggml_cpu_has_avx_vnni (void);
  1864. GGML_API int ggml_cpu_has_avx2 (void);
  1865. GGML_API int ggml_cpu_has_avx512 (void);
  1866. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1867. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1868. GGML_API int ggml_cpu_has_fma (void);
  1869. GGML_API int ggml_cpu_has_neon (void);
  1870. GGML_API int ggml_cpu_has_arm_fma (void);
  1871. GGML_API int ggml_cpu_has_metal (void);
  1872. GGML_API int ggml_cpu_has_f16c (void);
  1873. GGML_API int ggml_cpu_has_fp16_va (void);
  1874. GGML_API int ggml_cpu_has_wasm_simd (void);
  1875. GGML_API int ggml_cpu_has_blas (void);
  1876. GGML_API int ggml_cpu_has_cublas (void);
  1877. GGML_API int ggml_cpu_has_clblast (void);
  1878. GGML_API int ggml_cpu_has_gpublas (void);
  1879. GGML_API int ggml_cpu_has_sse3 (void);
  1880. GGML_API int ggml_cpu_has_ssse3 (void);
  1881. GGML_API int ggml_cpu_has_vsx (void);
  1882. //
  1883. // Internal types and functions exposed for tests and benchmarks
  1884. //
  1885. #ifdef __cplusplus
  1886. // restrict not standard in C++
  1887. #define GGML_RESTRICT
  1888. #else
  1889. #define GGML_RESTRICT restrict
  1890. #endif
  1891. typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1892. typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1893. typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
  1894. typedef struct {
  1895. const char * type_name;
  1896. int blck_size;
  1897. size_t type_size;
  1898. bool is_quantized;
  1899. ggml_to_float_t to_float;
  1900. ggml_from_float_t from_float;
  1901. ggml_from_float_t from_float_reference;
  1902. ggml_vec_dot_t vec_dot;
  1903. enum ggml_type vec_dot_type;
  1904. } ggml_type_traits_t;
  1905. GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
  1906. #ifdef __cplusplus
  1907. }
  1908. #endif