ggml.h 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph gf = ggml_build_forward(f);
  61. //
  62. // // set the input variable and parameter values
  63. // ggml_set_f32(x, 2.0f);
  64. // ggml_set_f32(a, 3.0f);
  65. // ggml_set_f32(b, 4.0f);
  66. //
  67. // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
  68. //
  69. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  70. //
  71. // ...
  72. // }
  73. //
  74. // The actual computation is performed in the ggml_graph_compute() function.
  75. //
  76. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  77. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  78. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  79. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  80. // actually needed.
  81. //
  82. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  83. // differentiation and optimization algorithms.
  84. //
  85. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  86. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  87. // the user can avoid the memory allocation overhead at runtime.
  88. //
  89. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  90. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  91. //
  92. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  93. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  94. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  95. // yet, but a few examples are demonstrated in the following operations:
  96. //
  97. // - ggml_permute()
  98. // - ggml_conv_1d_1s()
  99. // - ggml_conv_1d_2s()
  100. //
  101. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  102. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  103. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  104. // calculus class, or watch the following video:
  105. //
  106. // What is Automatic Differentiation?
  107. // https://www.youtube.com/watch?v=wG_nF1awSSY
  108. //
  109. //
  110. // ## Tensor data (struct ggml_tensor)
  111. //
  112. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  113. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  114. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  115. //
  116. // {
  117. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  118. //
  119. // assert(c->src[0] == a);
  120. // assert(c->src[1] == b);
  121. // }
  122. //
  123. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  124. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  125. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  126. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  127. // contiguous in memory.
  128. //
  129. // The data of the tensor is accessed via the "data" pointer. For example:
  130. //
  131. // {
  132. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
  133. //
  134. // // a[2, 1] = 1.0f;
  135. // *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
  136. //
  137. // // a[0, 2] = 2.0f;
  138. // *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
  139. //
  140. // ...
  141. // }
  142. //
  143. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  144. //
  145. // ## The matrix multiplication operator (ggml_mul_mat)
  146. //
  147. // TODO
  148. //
  149. //
  150. // ## Multi-threading
  151. //
  152. // TODO
  153. //
  154. //
  155. // ## Overview of ggml.c
  156. //
  157. // TODO
  158. //
  159. //
  160. // ## SIMD optimizations
  161. //
  162. // TODO
  163. //
  164. //
  165. // ## Debugging ggml
  166. //
  167. // TODO
  168. //
  169. //
  170. #ifdef GGML_SHARED
  171. # if defined(_WIN32) && !defined(__MINGW32__)
  172. # ifdef GGML_BUILD
  173. # define GGML_API __declspec(dllexport)
  174. # else
  175. # define GGML_API __declspec(dllimport)
  176. # endif
  177. # else
  178. # define GGML_API __attribute__ ((visibility ("default")))
  179. # endif
  180. #else
  181. # define GGML_API
  182. #endif
  183. #include <stdint.h>
  184. #include <stddef.h>
  185. #include <stdbool.h>
  186. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  187. #define GGML_FILE_VERSION 1
  188. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  189. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  190. #define GGML_MAX_DIMS 4
  191. #define GGML_MAX_NODES 4096
  192. #define GGML_MAX_PARAMS 256
  193. #define GGML_MAX_CONTEXTS 64
  194. #define GGML_MAX_SRC 6
  195. #define GGML_MAX_NAME 48
  196. #define GGML_MAX_OP_PARAMS 32
  197. #define GGML_DEFAULT_N_THREADS 4
  198. #define GGML_EXIT_SUCCESS 0
  199. #define GGML_EXIT_ABORTED 1
  200. #define GGML_UNUSED(x) (void)(x)
  201. #define GGML_ASSERT(x) \
  202. do { \
  203. if (!(x)) { \
  204. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  205. abort(); \
  206. } \
  207. } while (0)
  208. // used to copy the number of elements and stride in bytes of tensors into local variables.
  209. // main purpose is to reduce code duplication and improve readability.
  210. //
  211. // example:
  212. //
  213. // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  214. // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  215. //
  216. #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
  217. const type prefix##0 = (pointer)->array[0]; \
  218. GGML_UNUSED(prefix##0);
  219. #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
  220. GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
  221. const type prefix##1 = (pointer)->array[1]; \
  222. GGML_UNUSED(prefix##1);
  223. #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
  224. GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
  225. const type prefix##2 = (pointer)->array[2]; \
  226. GGML_UNUSED(prefix##2);
  227. #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
  228. GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
  229. const type prefix##3 = (pointer)->array[3]; \
  230. GGML_UNUSED(prefix##3);
  231. #ifdef __cplusplus
  232. extern "C" {
  233. #endif
  234. #ifdef __ARM_NEON
  235. // we use the built-in 16-bit float type
  236. typedef __fp16 ggml_fp16_t;
  237. #else
  238. typedef uint16_t ggml_fp16_t;
  239. #endif
  240. // convert FP16 <-> FP32
  241. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  242. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  243. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
  244. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
  245. struct ggml_object;
  246. struct ggml_context;
  247. enum ggml_type {
  248. GGML_TYPE_F32 = 0,
  249. GGML_TYPE_F16 = 1,
  250. GGML_TYPE_Q4_0 = 2,
  251. GGML_TYPE_Q4_1 = 3,
  252. // GGML_TYPE_Q4_2 = 4, support has been removed
  253. // GGML_TYPE_Q4_3 (5) support has been removed
  254. GGML_TYPE_Q5_0 = 6,
  255. GGML_TYPE_Q5_1 = 7,
  256. GGML_TYPE_Q8_0 = 8,
  257. GGML_TYPE_Q8_1 = 9,
  258. // k-quantizations
  259. GGML_TYPE_Q2_K = 10,
  260. GGML_TYPE_Q3_K = 11,
  261. GGML_TYPE_Q4_K = 12,
  262. GGML_TYPE_Q5_K = 13,
  263. GGML_TYPE_Q6_K = 14,
  264. GGML_TYPE_Q8_K = 15,
  265. GGML_TYPE_I8,
  266. GGML_TYPE_I16,
  267. GGML_TYPE_I32,
  268. GGML_TYPE_COUNT,
  269. };
  270. enum ggml_backend {
  271. GGML_BACKEND_CPU = 0,
  272. GGML_BACKEND_GPU = 10,
  273. GGML_BACKEND_GPU_SPLIT = 20,
  274. };
  275. // model file types
  276. enum ggml_ftype {
  277. GGML_FTYPE_UNKNOWN = -1,
  278. GGML_FTYPE_ALL_F32 = 0,
  279. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  280. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  281. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  282. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  283. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  284. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  285. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  286. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  287. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  288. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  289. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  290. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  291. };
  292. // available tensor operations:
  293. enum ggml_op {
  294. GGML_OP_NONE = 0,
  295. GGML_OP_DUP,
  296. GGML_OP_ADD,
  297. GGML_OP_ADD1,
  298. GGML_OP_ACC,
  299. GGML_OP_SUB,
  300. GGML_OP_MUL,
  301. GGML_OP_DIV,
  302. GGML_OP_SQR,
  303. GGML_OP_SQRT,
  304. GGML_OP_LOG,
  305. GGML_OP_SUM,
  306. GGML_OP_SUM_ROWS,
  307. GGML_OP_MEAN,
  308. GGML_OP_ARGMAX,
  309. GGML_OP_REPEAT,
  310. GGML_OP_REPEAT_BACK,
  311. GGML_OP_SILU_BACK,
  312. GGML_OP_NORM, // normalize
  313. GGML_OP_RMS_NORM,
  314. GGML_OP_RMS_NORM_BACK,
  315. GGML_OP_MUL_MAT,
  316. GGML_OP_OUT_PROD,
  317. GGML_OP_SCALE,
  318. GGML_OP_SET,
  319. GGML_OP_CPY,
  320. GGML_OP_CONT,
  321. GGML_OP_RESHAPE,
  322. GGML_OP_VIEW,
  323. GGML_OP_PERMUTE,
  324. GGML_OP_TRANSPOSE,
  325. GGML_OP_GET_ROWS,
  326. GGML_OP_GET_ROWS_BACK,
  327. GGML_OP_DIAG,
  328. GGML_OP_DIAG_MASK_INF,
  329. GGML_OP_DIAG_MASK_ZERO,
  330. GGML_OP_SOFT_MAX,
  331. GGML_OP_SOFT_MAX_BACK,
  332. GGML_OP_ROPE,
  333. GGML_OP_ROPE_BACK,
  334. GGML_OP_ALIBI,
  335. GGML_OP_CLAMP,
  336. GGML_OP_CONV_1D,
  337. GGML_OP_CONV_2D,
  338. GGML_OP_POOL_1D,
  339. GGML_OP_POOL_2D,
  340. GGML_OP_FLASH_ATTN,
  341. GGML_OP_FLASH_FF,
  342. GGML_OP_FLASH_ATTN_BACK,
  343. GGML_OP_WIN_PART,
  344. GGML_OP_WIN_UNPART,
  345. GGML_OP_UNARY,
  346. GGML_OP_MAP_UNARY,
  347. GGML_OP_MAP_BINARY,
  348. GGML_OP_MAP_CUSTOM1,
  349. GGML_OP_MAP_CUSTOM2,
  350. GGML_OP_MAP_CUSTOM3,
  351. GGML_OP_CROSS_ENTROPY_LOSS,
  352. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  353. GGML_OP_COUNT,
  354. };
  355. enum ggml_unary_op {
  356. GGML_UNARY_OP_ABS,
  357. GGML_UNARY_OP_SGN,
  358. GGML_UNARY_OP_NEG,
  359. GGML_UNARY_OP_STEP,
  360. GGML_UNARY_OP_TANH,
  361. GGML_UNARY_OP_ELU,
  362. GGML_UNARY_OP_RELU,
  363. GGML_UNARY_OP_GELU,
  364. GGML_UNARY_OP_GELU_QUICK,
  365. GGML_UNARY_OP_SILU,
  366. };
  367. // ggml object
  368. struct ggml_object {
  369. size_t offs;
  370. size_t size;
  371. struct ggml_object * next;
  372. char padding[8];
  373. };
  374. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  375. // n-dimensional tensor
  376. struct ggml_tensor {
  377. enum ggml_type type;
  378. enum ggml_backend backend;
  379. int n_dims;
  380. int64_t ne[GGML_MAX_DIMS]; // number of elements
  381. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  382. // nb[0] = sizeof(type)
  383. // nb[1] = nb[0] * ne[0] + padding
  384. // nb[i] = nb[i-1] * ne[i-1]
  385. // compute data
  386. enum ggml_op op;
  387. // op params - allocated as int32_t for alignment
  388. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(uint32_t)];
  389. bool is_param;
  390. struct ggml_tensor * grad;
  391. struct ggml_tensor * src[GGML_MAX_SRC];
  392. // performance
  393. int perf_runs;
  394. int64_t perf_cycles;
  395. int64_t perf_time_us;
  396. void * data;
  397. char name[GGML_MAX_NAME];
  398. void * extra; // extra things e.g. for ggml-cuda.cu
  399. char padding[4];
  400. };
  401. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  402. // the compute plan that needs to be prepared for ggml_graph_compute()
  403. // since https://github.com/ggerganov/ggml/issues/287
  404. struct ggml_cplan {
  405. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  406. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  407. int n_threads;
  408. // the `n_tasks` of nodes, 1:1 mapping to cgraph nodes
  409. int n_tasks[GGML_MAX_NODES];
  410. // abort ggml_graph_compute when true
  411. bool (*abort_callback)(void * data);
  412. void * abort_callback_data;
  413. };
  414. // next prime after GGML_MAX_NODES
  415. // #define GGML_GRAPH_HASHTABLE_SIZE 4099
  416. // next prime after GGML_MAX_NODES * 2 (nodes + leafs)
  417. #define GGML_GRAPH_HASHTABLE_SIZE 8273
  418. // computation graph
  419. struct ggml_cgraph {
  420. int n_nodes;
  421. int n_leafs;
  422. struct ggml_tensor * nodes[GGML_MAX_NODES];
  423. struct ggml_tensor * grads[GGML_MAX_NODES];
  424. struct ggml_tensor * leafs[GGML_MAX_NODES];
  425. void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE];
  426. // performance
  427. int perf_runs;
  428. int64_t perf_cycles;
  429. int64_t perf_time_us;
  430. };
  431. // scratch buffer
  432. struct ggml_scratch {
  433. size_t offs;
  434. size_t size;
  435. void * data;
  436. };
  437. struct ggml_init_params {
  438. // memory pool
  439. size_t mem_size; // bytes
  440. void * mem_buffer; // if NULL, memory will be allocated internally
  441. bool no_alloc; // don't allocate memory for the tensor data
  442. };
  443. // compute types
  444. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  445. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  446. enum ggml_task_type {
  447. GGML_TASK_INIT = 0,
  448. GGML_TASK_COMPUTE,
  449. GGML_TASK_FINALIZE,
  450. };
  451. struct ggml_compute_params {
  452. enum ggml_task_type type;
  453. // ith = thread index, nth = number of threads
  454. int ith, nth;
  455. // work buffer for all threads
  456. size_t wsize;
  457. void * wdata;
  458. };
  459. // misc
  460. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  461. GGML_API int64_t ggml_time_ms(void);
  462. GGML_API int64_t ggml_time_us(void);
  463. GGML_API int64_t ggml_cycles(void);
  464. GGML_API int64_t ggml_cycles_per_ms(void);
  465. GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
  466. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  467. GGML_API void ggml_print_object (const struct ggml_object * obj);
  468. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  469. GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
  470. GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
  471. GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
  472. GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
  473. GGML_API int ggml_blck_size (enum ggml_type type);
  474. GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
  475. GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
  476. GGML_API const char * ggml_type_name(enum ggml_type type);
  477. GGML_API const char * ggml_op_name (enum ggml_op op);
  478. GGML_API const char * ggml_op_symbol(enum ggml_op op);
  479. GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
  480. GGML_API bool ggml_is_quantized(enum ggml_type type);
  481. // TODO: temporary until model loading of ggml examples is refactored
  482. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  483. GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
  484. GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  485. GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
  486. // use this to compute the memory overhead of a tensor
  487. GGML_API size_t ggml_tensor_overhead(void);
  488. // main
  489. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  490. GGML_API void ggml_free(struct ggml_context * ctx);
  491. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  492. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  493. GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
  494. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  495. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  496. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  497. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  498. GGML_API struct ggml_tensor * ggml_new_tensor(
  499. struct ggml_context * ctx,
  500. enum ggml_type type,
  501. int n_dims,
  502. const int64_t *ne);
  503. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  504. struct ggml_context * ctx,
  505. enum ggml_type type,
  506. int64_t ne0);
  507. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  508. struct ggml_context * ctx,
  509. enum ggml_type type,
  510. int64_t ne0,
  511. int64_t ne1);
  512. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  513. struct ggml_context * ctx,
  514. enum ggml_type type,
  515. int64_t ne0,
  516. int64_t ne1,
  517. int64_t ne2);
  518. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  519. struct ggml_context * ctx,
  520. enum ggml_type type,
  521. int64_t ne0,
  522. int64_t ne1,
  523. int64_t ne2,
  524. int64_t ne3);
  525. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  526. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  527. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  528. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
  529. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  530. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  531. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  532. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  533. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  534. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  535. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  536. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  537. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  538. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  539. GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
  540. GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
  541. GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
  542. GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
  543. //
  544. // operations on tensors with backpropagation
  545. //
  546. GGML_API struct ggml_tensor * ggml_dup(
  547. struct ggml_context * ctx,
  548. struct ggml_tensor * a);
  549. // in-place, returns view(a)
  550. GGML_API struct ggml_tensor * ggml_dup_inplace(
  551. struct ggml_context * ctx,
  552. struct ggml_tensor * a);
  553. GGML_API struct ggml_tensor * ggml_add(
  554. struct ggml_context * ctx,
  555. struct ggml_tensor * a,
  556. struct ggml_tensor * b);
  557. GGML_API struct ggml_tensor * ggml_add_inplace(
  558. struct ggml_context * ctx,
  559. struct ggml_tensor * a,
  560. struct ggml_tensor * b);
  561. GGML_API struct ggml_tensor * ggml_add1(
  562. struct ggml_context * ctx,
  563. struct ggml_tensor * a,
  564. struct ggml_tensor * b);
  565. GGML_API struct ggml_tensor * ggml_add1_inplace(
  566. struct ggml_context * ctx,
  567. struct ggml_tensor * a,
  568. struct ggml_tensor * b);
  569. GGML_API struct ggml_tensor * ggml_acc(
  570. struct ggml_context * ctx,
  571. struct ggml_tensor * a,
  572. struct ggml_tensor * b,
  573. size_t nb1,
  574. size_t nb2,
  575. size_t nb3,
  576. size_t offset);
  577. GGML_API struct ggml_tensor * ggml_acc_inplace(
  578. struct ggml_context * ctx,
  579. struct ggml_tensor * a,
  580. struct ggml_tensor * b,
  581. size_t nb1,
  582. size_t nb2,
  583. size_t nb3,
  584. size_t offset);
  585. GGML_API struct ggml_tensor * ggml_sub(
  586. struct ggml_context * ctx,
  587. struct ggml_tensor * a,
  588. struct ggml_tensor * b);
  589. GGML_API struct ggml_tensor * ggml_sub_inplace(
  590. struct ggml_context * ctx,
  591. struct ggml_tensor * a,
  592. struct ggml_tensor * b);
  593. GGML_API struct ggml_tensor * ggml_mul(
  594. struct ggml_context * ctx,
  595. struct ggml_tensor * a,
  596. struct ggml_tensor * b);
  597. GGML_API struct ggml_tensor * ggml_mul_inplace(
  598. struct ggml_context * ctx,
  599. struct ggml_tensor * a,
  600. struct ggml_tensor * b);
  601. GGML_API struct ggml_tensor * ggml_div(
  602. struct ggml_context * ctx,
  603. struct ggml_tensor * a,
  604. struct ggml_tensor * b);
  605. GGML_API struct ggml_tensor * ggml_div_inplace(
  606. struct ggml_context * ctx,
  607. struct ggml_tensor * a,
  608. struct ggml_tensor * b);
  609. GGML_API struct ggml_tensor * ggml_sqr(
  610. struct ggml_context * ctx,
  611. struct ggml_tensor * a);
  612. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  613. struct ggml_context * ctx,
  614. struct ggml_tensor * a);
  615. GGML_API struct ggml_tensor * ggml_sqrt(
  616. struct ggml_context * ctx,
  617. struct ggml_tensor * a);
  618. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  619. struct ggml_context * ctx,
  620. struct ggml_tensor * a);
  621. GGML_API struct ggml_tensor * ggml_log(
  622. struct ggml_context * ctx,
  623. struct ggml_tensor * a);
  624. GGML_API struct ggml_tensor * ggml_log_inplace(
  625. struct ggml_context * ctx,
  626. struct ggml_tensor * a);
  627. // return scalar
  628. GGML_API struct ggml_tensor * ggml_sum(
  629. struct ggml_context * ctx,
  630. struct ggml_tensor * a);
  631. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  632. GGML_API struct ggml_tensor * ggml_sum_rows(
  633. struct ggml_context * ctx,
  634. struct ggml_tensor * a);
  635. // mean along rows
  636. GGML_API struct ggml_tensor * ggml_mean(
  637. struct ggml_context * ctx,
  638. struct ggml_tensor * a);
  639. // argmax along rows
  640. GGML_API struct ggml_tensor * ggml_argmax(
  641. struct ggml_context * ctx,
  642. struct ggml_tensor * a);
  643. // if a is the same shape as b, and a is not parameter, return a
  644. // otherwise, return a new tensor: repeat(a) to fit in b
  645. GGML_API struct ggml_tensor * ggml_repeat(
  646. struct ggml_context * ctx,
  647. struct ggml_tensor * a,
  648. struct ggml_tensor * b);
  649. GGML_API struct ggml_tensor * ggml_repeat_back(
  650. struct ggml_context * ctx,
  651. struct ggml_tensor * a,
  652. struct ggml_tensor * b);
  653. GGML_API struct ggml_tensor * ggml_abs(
  654. struct ggml_context * ctx,
  655. struct ggml_tensor * a);
  656. GGML_API struct ggml_tensor * ggml_abs_inplace(
  657. struct ggml_context * ctx,
  658. struct ggml_tensor * a);
  659. GGML_API struct ggml_tensor * ggml_sgn(
  660. struct ggml_context * ctx,
  661. struct ggml_tensor * a);
  662. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  663. struct ggml_context * ctx,
  664. struct ggml_tensor * a);
  665. GGML_API struct ggml_tensor * ggml_neg(
  666. struct ggml_context * ctx,
  667. struct ggml_tensor * a);
  668. GGML_API struct ggml_tensor * ggml_neg_inplace(
  669. struct ggml_context * ctx,
  670. struct ggml_tensor * a);
  671. GGML_API struct ggml_tensor * ggml_step(
  672. struct ggml_context * ctx,
  673. struct ggml_tensor * a);
  674. GGML_API struct ggml_tensor * ggml_step_inplace(
  675. struct ggml_context * ctx,
  676. struct ggml_tensor * a);
  677. GGML_API struct ggml_tensor * ggml_tanh(
  678. struct ggml_context * ctx,
  679. struct ggml_tensor * a);
  680. GGML_API struct ggml_tensor * ggml_tanh_inplace(
  681. struct ggml_context * ctx,
  682. struct ggml_tensor * a);
  683. GGML_API struct ggml_tensor * ggml_elu(
  684. struct ggml_context * ctx,
  685. struct ggml_tensor * a);
  686. GGML_API struct ggml_tensor * ggml_elu_inplace(
  687. struct ggml_context * ctx,
  688. struct ggml_tensor * a);
  689. GGML_API struct ggml_tensor * ggml_relu(
  690. struct ggml_context * ctx,
  691. struct ggml_tensor * a);
  692. GGML_API struct ggml_tensor * ggml_relu_inplace(
  693. struct ggml_context * ctx,
  694. struct ggml_tensor * a);
  695. // TODO: double-check this computation is correct
  696. GGML_API struct ggml_tensor * ggml_gelu(
  697. struct ggml_context * ctx,
  698. struct ggml_tensor * a);
  699. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  700. struct ggml_context * ctx,
  701. struct ggml_tensor * a);
  702. GGML_API struct ggml_tensor * ggml_gelu_quick(
  703. struct ggml_context * ctx,
  704. struct ggml_tensor * a);
  705. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  706. struct ggml_context * ctx,
  707. struct ggml_tensor * a);
  708. GGML_API struct ggml_tensor * ggml_silu(
  709. struct ggml_context * ctx,
  710. struct ggml_tensor * a);
  711. GGML_API struct ggml_tensor * ggml_silu_inplace(
  712. struct ggml_context * ctx,
  713. struct ggml_tensor * a);
  714. // a - x
  715. // b - dy
  716. GGML_API struct ggml_tensor * ggml_silu_back(
  717. struct ggml_context * ctx,
  718. struct ggml_tensor * a,
  719. struct ggml_tensor * b);
  720. // normalize along rows
  721. // TODO: eps is hardcoded to 1e-5 for now
  722. GGML_API struct ggml_tensor * ggml_norm(
  723. struct ggml_context * ctx,
  724. struct ggml_tensor * a);
  725. GGML_API struct ggml_tensor * ggml_norm_inplace(
  726. struct ggml_context * ctx,
  727. struct ggml_tensor * a);
  728. GGML_API struct ggml_tensor * ggml_rms_norm(
  729. struct ggml_context * ctx,
  730. struct ggml_tensor * a,
  731. float eps);
  732. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  733. struct ggml_context * ctx,
  734. struct ggml_tensor * a,
  735. float eps);
  736. // a - x
  737. // b - dy
  738. // TODO: update with configurable eps
  739. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  740. struct ggml_context * ctx,
  741. struct ggml_tensor * a,
  742. struct ggml_tensor * b);
  743. // A: n columns, m rows
  744. // B: n columns, p rows (i.e. we transpose it internally)
  745. // result is m columns, p rows
  746. GGML_API struct ggml_tensor * ggml_mul_mat(
  747. struct ggml_context * ctx,
  748. struct ggml_tensor * a,
  749. struct ggml_tensor * b);
  750. // A: m columns, n rows,
  751. // B: p columns, n rows,
  752. // result is m columns, p rows
  753. GGML_API struct ggml_tensor * ggml_out_prod(
  754. struct ggml_context * ctx,
  755. struct ggml_tensor * a,
  756. struct ggml_tensor * b);
  757. //
  758. // operations on tensors without backpropagation
  759. //
  760. GGML_API struct ggml_tensor * ggml_scale(
  761. struct ggml_context * ctx,
  762. struct ggml_tensor * a,
  763. struct ggml_tensor * b);
  764. // in-place, returns view(a)
  765. GGML_API struct ggml_tensor * ggml_scale_inplace(
  766. struct ggml_context * ctx,
  767. struct ggml_tensor * a,
  768. struct ggml_tensor * b);
  769. // b -> view(a,offset,nb1,nb2,3), return modified a
  770. GGML_API struct ggml_tensor * ggml_set(
  771. struct ggml_context * ctx,
  772. struct ggml_tensor * a,
  773. struct ggml_tensor * b,
  774. size_t nb1,
  775. size_t nb2,
  776. size_t nb3,
  777. size_t offset);
  778. // b -> view(a,offset,nb1,nb2,3), return view(a)
  779. GGML_API struct ggml_tensor * ggml_set_inplace(
  780. struct ggml_context * ctx,
  781. struct ggml_tensor * a,
  782. struct ggml_tensor * b,
  783. size_t nb1,
  784. size_t nb2,
  785. size_t nb3,
  786. size_t offset);
  787. GGML_API struct ggml_tensor * ggml_set_1d(
  788. struct ggml_context * ctx,
  789. struct ggml_tensor * a,
  790. struct ggml_tensor * b,
  791. size_t offset);
  792. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  793. struct ggml_context * ctx,
  794. struct ggml_tensor * a,
  795. struct ggml_tensor * b,
  796. size_t offset);
  797. // b -> view(a,offset,nb1,nb2,3), return modified a
  798. GGML_API struct ggml_tensor * ggml_set_2d(
  799. struct ggml_context * ctx,
  800. struct ggml_tensor * a,
  801. struct ggml_tensor * b,
  802. size_t nb1,
  803. size_t offset);
  804. // b -> view(a,offset,nb1,nb2,3), return view(a)
  805. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  806. struct ggml_context * ctx,
  807. struct ggml_tensor * a,
  808. struct ggml_tensor * b,
  809. size_t nb1,
  810. size_t offset);
  811. // a -> b, return view(b)
  812. GGML_API struct ggml_tensor * ggml_cpy(
  813. struct ggml_context * ctx,
  814. struct ggml_tensor * a,
  815. struct ggml_tensor * b);
  816. // a -> b, in-place, return view(b)
  817. GGML_API struct ggml_tensor * ggml_cpy_inplace(
  818. struct ggml_context * ctx,
  819. struct ggml_tensor * a,
  820. struct ggml_tensor * b);
  821. // make contiguous
  822. GGML_API struct ggml_tensor * ggml_cont(
  823. struct ggml_context * ctx,
  824. struct ggml_tensor * a);
  825. // make contiguous, in-place
  826. GGML_API struct ggml_tensor * ggml_cont_inplace(
  827. struct ggml_context * ctx,
  828. struct ggml_tensor * a);
  829. // return view(a), b specifies the new shape
  830. // TODO: when we start computing gradient, make a copy instead of view
  831. GGML_API struct ggml_tensor * ggml_reshape(
  832. struct ggml_context * ctx,
  833. struct ggml_tensor * a,
  834. struct ggml_tensor * b);
  835. // return view(a)
  836. // TODO: when we start computing gradient, make a copy instead of view
  837. GGML_API struct ggml_tensor * ggml_reshape_1d(
  838. struct ggml_context * ctx,
  839. struct ggml_tensor * a,
  840. int64_t ne0);
  841. GGML_API struct ggml_tensor * ggml_reshape_2d(
  842. struct ggml_context * ctx,
  843. struct ggml_tensor * a,
  844. int64_t ne0,
  845. int64_t ne1);
  846. // return view(a)
  847. // TODO: when we start computing gradient, make a copy instead of view
  848. GGML_API struct ggml_tensor * ggml_reshape_3d(
  849. struct ggml_context * ctx,
  850. struct ggml_tensor * a,
  851. int64_t ne0,
  852. int64_t ne1,
  853. int64_t ne2);
  854. GGML_API struct ggml_tensor * ggml_reshape_4d(
  855. struct ggml_context * ctx,
  856. struct ggml_tensor * a,
  857. int64_t ne0,
  858. int64_t ne1,
  859. int64_t ne2,
  860. int64_t ne3);
  861. // offset in bytes
  862. GGML_API struct ggml_tensor * ggml_view_1d(
  863. struct ggml_context * ctx,
  864. struct ggml_tensor * a,
  865. int64_t ne0,
  866. size_t offset);
  867. GGML_API struct ggml_tensor * ggml_view_2d(
  868. struct ggml_context * ctx,
  869. struct ggml_tensor * a,
  870. int64_t ne0,
  871. int64_t ne1,
  872. size_t nb1, // row stride in bytes
  873. size_t offset);
  874. GGML_API struct ggml_tensor * ggml_view_3d(
  875. struct ggml_context * ctx,
  876. struct ggml_tensor * a,
  877. int64_t ne0,
  878. int64_t ne1,
  879. int64_t ne2,
  880. size_t nb1, // row stride in bytes
  881. size_t nb2, // slice stride in bytes
  882. size_t offset);
  883. GGML_API struct ggml_tensor * ggml_view_4d(
  884. struct ggml_context * ctx,
  885. struct ggml_tensor * a,
  886. int64_t ne0,
  887. int64_t ne1,
  888. int64_t ne2,
  889. int64_t ne3,
  890. size_t nb1, // row stride in bytes
  891. size_t nb2, // slice stride in bytes
  892. size_t nb3,
  893. size_t offset);
  894. GGML_API struct ggml_tensor * ggml_permute(
  895. struct ggml_context * ctx,
  896. struct ggml_tensor * a,
  897. int axis0,
  898. int axis1,
  899. int axis2,
  900. int axis3);
  901. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  902. GGML_API struct ggml_tensor * ggml_transpose(
  903. struct ggml_context * ctx,
  904. struct ggml_tensor * a);
  905. GGML_API struct ggml_tensor * ggml_get_rows(
  906. struct ggml_context * ctx,
  907. struct ggml_tensor * a,
  908. struct ggml_tensor * b);
  909. GGML_API struct ggml_tensor * ggml_get_rows_back(
  910. struct ggml_context * ctx,
  911. struct ggml_tensor * a,
  912. struct ggml_tensor * b,
  913. struct ggml_tensor * c);
  914. GGML_API struct ggml_tensor * ggml_diag(
  915. struct ggml_context * ctx,
  916. struct ggml_tensor * a);
  917. // set elements above the diagonal to -INF
  918. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  919. struct ggml_context * ctx,
  920. struct ggml_tensor * a,
  921. int n_past);
  922. // in-place, returns view(a)
  923. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  924. struct ggml_context * ctx,
  925. struct ggml_tensor * a,
  926. int n_past);
  927. // set elements above the diagonal to 0
  928. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  929. struct ggml_context * ctx,
  930. struct ggml_tensor * a,
  931. int n_past);
  932. // in-place, returns view(a)
  933. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  934. struct ggml_context * ctx,
  935. struct ggml_tensor * a,
  936. int n_past);
  937. GGML_API struct ggml_tensor * ggml_soft_max(
  938. struct ggml_context * ctx,
  939. struct ggml_tensor * a);
  940. // in-place, returns view(a)
  941. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  942. struct ggml_context * ctx,
  943. struct ggml_tensor * a);
  944. GGML_API struct ggml_tensor * ggml_soft_max_back(
  945. struct ggml_context * ctx,
  946. struct ggml_tensor * a,
  947. struct ggml_tensor * b);
  948. // in-place, returns view(a)
  949. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  950. struct ggml_context * ctx,
  951. struct ggml_tensor * a,
  952. struct ggml_tensor * b);
  953. // rotary position embedding
  954. // if mode & 1 == 1, skip n_past elements
  955. // if mode & 2 == 1, GPT-NeoX style
  956. // if mode & 4 == 1, ChatGLM style
  957. // TODO: avoid creating a new tensor every time
  958. GGML_API struct ggml_tensor * ggml_rope(
  959. struct ggml_context * ctx,
  960. struct ggml_tensor * a,
  961. int n_past,
  962. int n_dims,
  963. int mode,
  964. int n_ctx);
  965. // in-place, returns view(a)
  966. GGML_API struct ggml_tensor * ggml_rope_inplace(
  967. struct ggml_context * ctx,
  968. struct ggml_tensor * a,
  969. int n_past,
  970. int n_dims,
  971. int mode,
  972. int n_ctx);
  973. // custom RoPE, in-place, returns view(a)
  974. GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
  975. struct ggml_context * ctx,
  976. struct ggml_tensor * a,
  977. int n_past,
  978. int n_dims,
  979. int mode,
  980. int n_ctx,
  981. float freq_base,
  982. float freq_scale);
  983. // rotary position embedding backward, i.e compute dx from dy
  984. // a - dy
  985. GGML_API struct ggml_tensor * ggml_rope_back(
  986. struct ggml_context * ctx,
  987. struct ggml_tensor * a,
  988. int n_past,
  989. int n_dims,
  990. int mode,
  991. int n_ctx);
  992. // alibi position embedding
  993. // in-place, returns view(a)
  994. struct ggml_tensor * ggml_alibi(
  995. struct ggml_context * ctx,
  996. struct ggml_tensor * a,
  997. int n_past,
  998. int n_head,
  999. float bias_max);
  1000. // clamp
  1001. // in-place, returns view(a)
  1002. struct ggml_tensor * ggml_clamp(
  1003. struct ggml_context * ctx,
  1004. struct ggml_tensor * a,
  1005. float min,
  1006. float max);
  1007. GGML_API struct ggml_tensor * ggml_conv_1d(
  1008. struct ggml_context * ctx,
  1009. struct ggml_tensor * a,
  1010. struct ggml_tensor * b,
  1011. int s0, // stride
  1012. int p0, // padding
  1013. int d0); // dilation
  1014. GGML_API struct ggml_tensor * ggml_conv_2d(
  1015. struct ggml_context * ctx,
  1016. struct ggml_tensor * a,
  1017. struct ggml_tensor * b,
  1018. int s0,
  1019. int s1,
  1020. int p0,
  1021. int p1,
  1022. int d0,
  1023. int d1);
  1024. // conv_1d with padding = half
  1025. // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
  1026. GGML_API struct ggml_tensor* ggml_conv_1d_ph(
  1027. struct ggml_context * ctx,
  1028. struct ggml_tensor * a,
  1029. struct ggml_tensor * b,
  1030. int s,
  1031. int d);
  1032. enum ggml_op_pool {
  1033. GGML_OP_POOL_MAX,
  1034. GGML_OP_POOL_AVG,
  1035. GGML_OP_POOL_COUNT,
  1036. };
  1037. GGML_API struct ggml_tensor* ggml_pool_1d(
  1038. struct ggml_context * ctx,
  1039. struct ggml_tensor * a,
  1040. enum ggml_op_pool op,
  1041. int k0, // kernel size
  1042. int s0, // stride
  1043. int p0); // padding
  1044. GGML_API struct ggml_tensor* ggml_pool_2d(
  1045. struct ggml_context * ctx,
  1046. struct ggml_tensor * a,
  1047. enum ggml_op_pool op,
  1048. int k0,
  1049. int k1,
  1050. int s0,
  1051. int s1,
  1052. int p0,
  1053. int p1);
  1054. GGML_API struct ggml_tensor * ggml_flash_attn(
  1055. struct ggml_context * ctx,
  1056. struct ggml_tensor * q,
  1057. struct ggml_tensor * k,
  1058. struct ggml_tensor * v,
  1059. bool masked);
  1060. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  1061. struct ggml_context * ctx,
  1062. struct ggml_tensor * q,
  1063. struct ggml_tensor * k,
  1064. struct ggml_tensor * v,
  1065. struct ggml_tensor * d,
  1066. bool masked);
  1067. GGML_API struct ggml_tensor * ggml_flash_ff(
  1068. struct ggml_context * ctx,
  1069. struct ggml_tensor * a,
  1070. struct ggml_tensor * b0,
  1071. struct ggml_tensor * b1,
  1072. struct ggml_tensor * c0,
  1073. struct ggml_tensor * c1);
  1074. // partition into non-overlapping windows with padding if needed
  1075. // example:
  1076. // a: 768 64 64 1
  1077. // w: 14
  1078. // res: 768 14 14 25
  1079. // used in sam
  1080. GGML_API struct ggml_tensor * ggml_win_part(
  1081. struct ggml_context * ctx,
  1082. struct ggml_tensor * a,
  1083. int w);
  1084. // reverse of ggml_win_part
  1085. // used in sam
  1086. GGML_API struct ggml_tensor * ggml_win_unpart(
  1087. struct ggml_context * ctx,
  1088. struct ggml_tensor * a,
  1089. int w0,
  1090. int h0,
  1091. int w);
  1092. // custom operators
  1093. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1094. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1095. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1096. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1097. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1098. GGML_API struct ggml_tensor * ggml_unary(
  1099. struct ggml_context * ctx,
  1100. struct ggml_tensor * a,
  1101. enum ggml_unary_op op);
  1102. GGML_API struct ggml_tensor * ggml_unary_inplace(
  1103. struct ggml_context * ctx,
  1104. struct ggml_tensor * a,
  1105. enum ggml_unary_op op);
  1106. GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1107. struct ggml_context * ctx,
  1108. struct ggml_tensor * a,
  1109. ggml_unary_op_f32_t fun);
  1110. GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1111. struct ggml_context * ctx,
  1112. struct ggml_tensor * a,
  1113. ggml_unary_op_f32_t fun);
  1114. GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1115. struct ggml_context * ctx,
  1116. struct ggml_tensor * a,
  1117. struct ggml_tensor * b,
  1118. ggml_binary_op_f32_t fun);
  1119. GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1120. struct ggml_context * ctx,
  1121. struct ggml_tensor * a,
  1122. struct ggml_tensor * b,
  1123. ggml_binary_op_f32_t fun);
  1124. GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1125. struct ggml_context * ctx,
  1126. struct ggml_tensor * a,
  1127. ggml_custom1_op_f32_t fun);
  1128. GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1129. struct ggml_context * ctx,
  1130. struct ggml_tensor * a,
  1131. ggml_custom1_op_f32_t fun);
  1132. GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1133. struct ggml_context * ctx,
  1134. struct ggml_tensor * a,
  1135. struct ggml_tensor * b,
  1136. ggml_custom2_op_f32_t fun);
  1137. GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1138. struct ggml_context * ctx,
  1139. struct ggml_tensor * a,
  1140. struct ggml_tensor * b,
  1141. ggml_custom2_op_f32_t fun);
  1142. GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1143. struct ggml_context * ctx,
  1144. struct ggml_tensor * a,
  1145. struct ggml_tensor * b,
  1146. struct ggml_tensor * c,
  1147. ggml_custom3_op_f32_t fun);
  1148. GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1149. struct ggml_context * ctx,
  1150. struct ggml_tensor * a,
  1151. struct ggml_tensor * b,
  1152. struct ggml_tensor * c,
  1153. ggml_custom3_op_f32_t fun);
  1154. // loss function
  1155. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1156. struct ggml_context * ctx,
  1157. struct ggml_tensor * a,
  1158. struct ggml_tensor * b);
  1159. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1160. struct ggml_context * ctx,
  1161. struct ggml_tensor * a,
  1162. struct ggml_tensor * b,
  1163. struct ggml_tensor * c);
  1164. //
  1165. // automatic differentiation
  1166. //
  1167. GGML_API void ggml_set_param(
  1168. struct ggml_context * ctx,
  1169. struct ggml_tensor * tensor);
  1170. GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1171. GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
  1172. GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
  1173. // ggml_graph_plan() has to be called before ggml_graph_compute()
  1174. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  1175. GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
  1176. GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  1177. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
  1178. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  1179. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  1180. GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  1181. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1182. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1183. GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1184. // print info and performance information for the graph
  1185. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1186. // dump the graph into a file using the dot format
  1187. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1188. //
  1189. // optimization
  1190. //
  1191. // optimization methods
  1192. enum ggml_opt_type {
  1193. GGML_OPT_ADAM,
  1194. GGML_OPT_LBFGS,
  1195. };
  1196. // linesearch methods
  1197. enum ggml_linesearch {
  1198. GGML_LINESEARCH_DEFAULT = 1,
  1199. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1200. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1201. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1202. };
  1203. // optimization return values
  1204. enum ggml_opt_result {
  1205. GGML_OPT_OK = 0,
  1206. GGML_OPT_DID_NOT_CONVERGE,
  1207. GGML_OPT_NO_CONTEXT,
  1208. GGML_OPT_INVALID_WOLFE,
  1209. GGML_OPT_FAIL,
  1210. GGML_LINESEARCH_FAIL = -128,
  1211. GGML_LINESEARCH_MINIMUM_STEP,
  1212. GGML_LINESEARCH_MAXIMUM_STEP,
  1213. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1214. GGML_LINESEARCH_INVALID_PARAMETERS,
  1215. };
  1216. // optimization parameters
  1217. //
  1218. // see ggml.c (ggml_opt_default_params) for default values
  1219. //
  1220. struct ggml_opt_params {
  1221. enum ggml_opt_type type;
  1222. int n_threads;
  1223. // delta-based convergence test
  1224. //
  1225. // if past == 0 - disabled
  1226. // if past > 0:
  1227. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1228. //
  1229. int past;
  1230. float delta;
  1231. // maximum number of iterations without improvement
  1232. //
  1233. // if 0 - disabled
  1234. // if > 0:
  1235. // assume convergence if no cost improvement in this number of iterations
  1236. //
  1237. int max_no_improvement;
  1238. bool print_forward_graph;
  1239. bool print_backward_graph;
  1240. // ADAM parameters
  1241. struct {
  1242. int n_iter;
  1243. float sched; // schedule multiplier (fixed, decay or warmup)
  1244. float decay; // weight decay for AdamW, use 0.0f to disable
  1245. float alpha; // learning rate
  1246. float beta1;
  1247. float beta2;
  1248. float eps; // epsilon for numerical stability
  1249. float eps_f; // epsilon for convergence test
  1250. float eps_g; // epsilon for convergence test
  1251. } adam;
  1252. // LBFGS parameters
  1253. struct {
  1254. int m; // number of corrections to approximate the inv. Hessian
  1255. int n_iter;
  1256. int max_linesearch;
  1257. float eps; // convergence tolerance
  1258. float ftol; // line search tolerance
  1259. float wolfe;
  1260. float min_step;
  1261. float max_step;
  1262. enum ggml_linesearch linesearch;
  1263. } lbfgs;
  1264. };
  1265. struct ggml_opt_context {
  1266. struct ggml_context * ctx;
  1267. struct ggml_opt_params params;
  1268. int iter;
  1269. int64_t nx; // number of parameter elements
  1270. bool just_initialized;
  1271. struct {
  1272. struct ggml_tensor * x; // view of the parameters
  1273. struct ggml_tensor * g1; // gradient
  1274. struct ggml_tensor * g2; // gradient squared
  1275. struct ggml_tensor * m; // first moment
  1276. struct ggml_tensor * v; // second moment
  1277. struct ggml_tensor * mh; // first moment hat
  1278. struct ggml_tensor * vh; // second moment hat
  1279. struct ggml_tensor * pf; // past function values
  1280. float fx_best;
  1281. float fx_prev;
  1282. int n_no_improvement;
  1283. } adam;
  1284. struct {
  1285. struct ggml_tensor * x; // current parameters
  1286. struct ggml_tensor * xp; // previous parameters
  1287. struct ggml_tensor * g; // current gradient
  1288. struct ggml_tensor * gp; // previous gradient
  1289. struct ggml_tensor * d; // search direction
  1290. struct ggml_tensor * pf; // past function values
  1291. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1292. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1293. struct ggml_tensor * lms; // the L-BFGS memory s
  1294. struct ggml_tensor * lmy; // the L-BFGS memory y
  1295. float fx_best;
  1296. float step;
  1297. int j;
  1298. int k;
  1299. int end;
  1300. int n_no_improvement;
  1301. } lbfgs;
  1302. };
  1303. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1304. // optimize the function defined by the tensor f
  1305. GGML_API enum ggml_opt_result ggml_opt(
  1306. struct ggml_context * ctx,
  1307. struct ggml_opt_params params,
  1308. struct ggml_tensor * f);
  1309. // initialize optimizer context
  1310. GGML_API void ggml_opt_init(
  1311. struct ggml_context * ctx,
  1312. struct ggml_opt_context * opt,
  1313. struct ggml_opt_params params,
  1314. int64_t nx);
  1315. // continue optimizing the function defined by the tensor f
  1316. GGML_API enum ggml_opt_result ggml_opt_resume(
  1317. struct ggml_context * ctx,
  1318. struct ggml_opt_context * opt,
  1319. struct ggml_tensor * f);
  1320. // continue optimizing the function defined by the tensor f
  1321. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1322. struct ggml_context * ctx,
  1323. struct ggml_opt_context * opt,
  1324. struct ggml_tensor * f,
  1325. struct ggml_cgraph * gf,
  1326. struct ggml_cgraph * gb);
  1327. //
  1328. // quantization
  1329. //
  1330. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1331. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1332. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1333. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1334. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1335. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
  1336. //
  1337. // system info
  1338. //
  1339. GGML_API int ggml_cpu_has_avx (void);
  1340. GGML_API int ggml_cpu_has_avx2 (void);
  1341. GGML_API int ggml_cpu_has_avx512 (void);
  1342. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1343. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1344. GGML_API int ggml_cpu_has_fma (void);
  1345. GGML_API int ggml_cpu_has_neon (void);
  1346. GGML_API int ggml_cpu_has_arm_fma (void);
  1347. GGML_API int ggml_cpu_has_f16c (void);
  1348. GGML_API int ggml_cpu_has_fp16_va (void);
  1349. GGML_API int ggml_cpu_has_wasm_simd (void);
  1350. GGML_API int ggml_cpu_has_blas (void);
  1351. GGML_API int ggml_cpu_has_cublas (void);
  1352. GGML_API int ggml_cpu_has_clblast (void);
  1353. GGML_API int ggml_cpu_has_gpublas (void);
  1354. GGML_API int ggml_cpu_has_sse3 (void);
  1355. GGML_API int ggml_cpu_has_vsx (void);
  1356. //
  1357. // Internal types and functions exposed for tests and benchmarks
  1358. //
  1359. #ifdef __cplusplus
  1360. // restrict not standard in C++
  1361. #define GGML_RESTRICT
  1362. #else
  1363. #define GGML_RESTRICT restrict
  1364. #endif
  1365. typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1366. typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1367. typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
  1368. typedef struct {
  1369. ggml_to_float_t to_float;
  1370. ggml_from_float_t from_float;
  1371. ggml_from_float_t from_float_reference;
  1372. ggml_vec_dot_t vec_dot;
  1373. enum ggml_type vec_dot_type;
  1374. } ggml_type_traits_t;
  1375. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i);
  1376. #ifdef __cplusplus
  1377. }
  1378. #endif