ggml.h 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph gf = ggml_build_forward(f);
  61. //
  62. // // set the input variable and parameter values
  63. // ggml_set_f32(x, 2.0f);
  64. // ggml_set_f32(a, 3.0f);
  65. // ggml_set_f32(b, 4.0f);
  66. //
  67. // ggml_graph_compute(ctx0, &gf);
  68. //
  69. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  70. //
  71. // ...
  72. // }
  73. //
  74. // The actual computation is performed in the ggml_graph_compute() function.
  75. //
  76. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  77. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  78. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  79. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  80. // actually needed.
  81. //
  82. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  83. // differentiation and optimization algorithms.
  84. //
  85. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  86. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  87. // the user can avoid the memory allocation overhead at runtime.
  88. //
  89. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  90. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  91. //
  92. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  93. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  94. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  95. // yet, but a few examples are demonstrated in the following operations:
  96. //
  97. // - ggml_permute()
  98. // - ggml_conv_1d_1s()
  99. // - ggml_conv_1d_2s()
  100. //
  101. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  102. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  103. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  104. // calculus class, or watch the following video:
  105. //
  106. // What is Automatic Differentiation?
  107. // https://www.youtube.com/watch?v=wG_nF1awSSY
  108. //
  109. //
  110. // ## Tensor data (struct ggml_tensor)
  111. //
  112. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  113. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  114. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  115. //
  116. // {
  117. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  118. //
  119. // assert(c->src[0] == a);
  120. // assert(c->src[1] == b);
  121. // }
  122. //
  123. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  124. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  125. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  126. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  127. // contiguous in memory.
  128. //
  129. // The data of the tensor is accessed via the "data" pointer. For example:
  130. //
  131. // {
  132. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
  133. //
  134. // // a[1, 2] = 1.0f;
  135. // *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
  136. //
  137. // // a[2, 0] = 2.0f;
  138. // *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
  139. //
  140. // ...
  141. // }
  142. //
  143. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  144. //
  145. // ## The matrix multiplication operator (ggml_mul_mat)
  146. //
  147. // TODO
  148. //
  149. //
  150. // ## Multi-threading
  151. //
  152. // TODO
  153. //
  154. //
  155. // ## Overview of ggml.c
  156. //
  157. // TODO
  158. //
  159. //
  160. // ## SIMD optimizations
  161. //
  162. // TODO
  163. //
  164. //
  165. // ## Debugging ggml
  166. //
  167. // TODO
  168. //
  169. //
  170. #ifdef GGML_SHARED
  171. # if defined(_WIN32) && !defined(__MINGW32__)
  172. # ifdef GGML_BUILD
  173. # define GGML_API __declspec(dllexport)
  174. # else
  175. # define GGML_API __declspec(dllimport)
  176. # endif
  177. # else
  178. # define GGML_API __attribute__ ((visibility ("default")))
  179. # endif
  180. #else
  181. # define GGML_API
  182. #endif
  183. #include <stdint.h>
  184. #include <stddef.h>
  185. #include <stdbool.h>
  186. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  187. #define GGML_FILE_VERSION 1
  188. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  189. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  190. #define GGML_MAX_DIMS 4
  191. #define GGML_MAX_NODES 4096
  192. #define GGML_MAX_PARAMS 256
  193. #define GGML_MAX_CONTEXTS 64
  194. #define GGML_MAX_OPT 4
  195. #define GGML_MAX_NAME 48
  196. #define GGML_DEFAULT_N_THREADS 4
  197. #define GGML_ASSERT(x) \
  198. do { \
  199. if (!(x)) { \
  200. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  201. abort(); \
  202. } \
  203. } while (0)
  204. #ifdef __cplusplus
  205. extern "C" {
  206. #endif
  207. #ifdef __ARM_NEON
  208. // we use the built-in 16-bit float type
  209. typedef __fp16 ggml_fp16_t;
  210. #else
  211. typedef uint16_t ggml_fp16_t;
  212. #endif
  213. // convert FP16 <-> FP32
  214. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  215. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  216. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n);
  217. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n);
  218. struct ggml_object;
  219. struct ggml_context;
  220. enum ggml_type {
  221. GGML_TYPE_F32 = 0,
  222. GGML_TYPE_F16 = 1,
  223. GGML_TYPE_Q4_0 = 2,
  224. GGML_TYPE_Q4_1 = 3,
  225. // GGML_TYPE_Q4_2 = 4, support has been removed
  226. // GGML_TYPE_Q4_3 (5) support has been removed
  227. GGML_TYPE_Q5_0 = 6,
  228. GGML_TYPE_Q5_1 = 7,
  229. GGML_TYPE_Q8_0 = 8,
  230. GGML_TYPE_Q8_1 = 9,
  231. // k-quantizations
  232. GGML_TYPE_Q2_K = 10,
  233. GGML_TYPE_Q3_K = 11,
  234. GGML_TYPE_Q4_K = 12,
  235. GGML_TYPE_Q5_K = 13,
  236. GGML_TYPE_Q6_K = 14,
  237. GGML_TYPE_Q8_K = 15,
  238. GGML_TYPE_I8,
  239. GGML_TYPE_I16,
  240. GGML_TYPE_I32,
  241. GGML_TYPE_COUNT,
  242. };
  243. enum ggml_backend {
  244. GGML_BACKEND_CPU = 0,
  245. GGML_BACKEND_GPU = 10,
  246. GGML_BACKEND_GPU_SPLIT = 20,
  247. };
  248. // model file types
  249. enum ggml_ftype {
  250. GGML_FTYPE_UNKNOWN = -1,
  251. GGML_FTYPE_ALL_F32 = 0,
  252. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  253. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  254. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  255. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  256. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  257. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  258. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  259. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  260. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  261. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  262. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  263. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  264. };
  265. // available tensor operations:
  266. enum ggml_op {
  267. GGML_OP_NONE = 0,
  268. GGML_OP_DUP,
  269. GGML_OP_ADD,
  270. GGML_OP_ADD1,
  271. GGML_OP_ACC,
  272. GGML_OP_SUB,
  273. GGML_OP_MUL,
  274. GGML_OP_DIV,
  275. GGML_OP_SQR,
  276. GGML_OP_SQRT,
  277. GGML_OP_LOG,
  278. GGML_OP_SUM,
  279. GGML_OP_SUM_ROWS,
  280. GGML_OP_MEAN,
  281. GGML_OP_REPEAT,
  282. GGML_OP_REPEAT_BACK,
  283. GGML_OP_ABS,
  284. GGML_OP_SGN,
  285. GGML_OP_NEG,
  286. GGML_OP_STEP,
  287. GGML_OP_RELU,
  288. GGML_OP_GELU,
  289. GGML_OP_GELU_QUICK,
  290. GGML_OP_SILU,
  291. GGML_OP_SILU_BACK,
  292. GGML_OP_NORM, // normalize
  293. GGML_OP_RMS_NORM,
  294. GGML_OP_RMS_NORM_BACK,
  295. GGML_OP_MUL_MAT,
  296. GGML_OP_OUT_PROD,
  297. GGML_OP_SCALE,
  298. GGML_OP_SET,
  299. GGML_OP_CPY,
  300. GGML_OP_CONT,
  301. GGML_OP_RESHAPE,
  302. GGML_OP_VIEW,
  303. GGML_OP_PERMUTE,
  304. GGML_OP_TRANSPOSE,
  305. GGML_OP_GET_ROWS,
  306. GGML_OP_GET_ROWS_BACK,
  307. GGML_OP_DIAG,
  308. GGML_OP_DIAG_MASK_INF,
  309. GGML_OP_DIAG_MASK_ZERO,
  310. GGML_OP_SOFT_MAX,
  311. GGML_OP_SOFT_MAX_BACK,
  312. GGML_OP_ROPE,
  313. GGML_OP_ROPE_BACK,
  314. GGML_OP_ALIBI,
  315. GGML_OP_CLAMP,
  316. GGML_OP_CONV_1D_S1_PH,
  317. GGML_OP_CONV_1D_S2_PH,
  318. GGML_OP_CONV_2D_SK_P0,
  319. GGML_OP_FLASH_ATTN,
  320. GGML_OP_FLASH_FF,
  321. GGML_OP_FLASH_ATTN_BACK,
  322. GGML_OP_WIN_PART,
  323. GGML_OP_WIN_UNPART,
  324. GGML_OP_MAP_UNARY,
  325. GGML_OP_MAP_BINARY,
  326. GGML_OP_MAP_CUSTOM1,
  327. GGML_OP_MAP_CUSTOM2,
  328. GGML_OP_MAP_CUSTOM3,
  329. GGML_OP_CROSS_ENTROPY_LOSS,
  330. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  331. GGML_OP_COUNT,
  332. };
  333. // ggml object
  334. struct ggml_object {
  335. size_t offs;
  336. size_t size;
  337. struct ggml_object * next;
  338. char padding[8];
  339. };
  340. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  341. // n-dimensional tensor
  342. struct ggml_tensor {
  343. enum ggml_type type;
  344. enum ggml_backend backend;
  345. int n_dims;
  346. int64_t ne[GGML_MAX_DIMS]; // number of elements
  347. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  348. // nb[0] = sizeof(type)
  349. // nb[1] = nb[0] * ne[0] + padding
  350. // nb[i] = nb[i-1] * ne[i-1]
  351. // compute data
  352. enum ggml_op op;
  353. bool is_param;
  354. struct ggml_tensor * grad;
  355. struct ggml_tensor * src0;
  356. struct ggml_tensor * src1;
  357. struct ggml_tensor * opt[GGML_MAX_OPT];
  358. // thread scheduling
  359. int n_tasks;
  360. // performance
  361. int perf_runs;
  362. int64_t perf_cycles;
  363. int64_t perf_time_us;
  364. void * data;
  365. char name[GGML_MAX_NAME];
  366. void * extra; // extra things e.g. for ggml-cuda.cu
  367. char padding[4];
  368. };
  369. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  370. // computation graph
  371. struct ggml_cgraph {
  372. int n_nodes;
  373. int n_leafs;
  374. int n_threads;
  375. size_t work_size;
  376. struct ggml_tensor * work;
  377. struct ggml_tensor * nodes[GGML_MAX_NODES];
  378. struct ggml_tensor * grads[GGML_MAX_NODES];
  379. struct ggml_tensor * leafs[GGML_MAX_NODES];
  380. // performance
  381. int perf_runs;
  382. int64_t perf_cycles;
  383. int64_t perf_time_us;
  384. };
  385. // scratch buffer
  386. struct ggml_scratch {
  387. size_t offs;
  388. size_t size;
  389. void * data;
  390. };
  391. struct ggml_init_params {
  392. // memory pool
  393. size_t mem_size; // bytes
  394. void * mem_buffer; // if NULL, memory will be allocated internally
  395. bool no_alloc; // don't allocate memory for the tensor data
  396. };
  397. // compute types
  398. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  399. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  400. enum ggml_task_type {
  401. GGML_TASK_INIT = 0,
  402. GGML_TASK_COMPUTE,
  403. GGML_TASK_FINALIZE,
  404. };
  405. struct ggml_compute_params {
  406. enum ggml_task_type type;
  407. // ith = thread index, nth = number of threads
  408. int ith, nth;
  409. // work buffer for all threads
  410. size_t wsize;
  411. void * wdata;
  412. };
  413. // misc
  414. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  415. GGML_API int64_t ggml_time_ms(void);
  416. GGML_API int64_t ggml_time_us(void);
  417. GGML_API int64_t ggml_cycles(void);
  418. GGML_API int64_t ggml_cycles_per_ms(void);
  419. GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
  420. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  421. GGML_API void ggml_print_object (const struct ggml_object * obj);
  422. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  423. GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
  424. GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
  425. GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
  426. GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
  427. GGML_API int ggml_blck_size (enum ggml_type type);
  428. GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
  429. GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
  430. GGML_API const char * ggml_type_name(enum ggml_type type);
  431. GGML_API const char * ggml_op_name (enum ggml_op op);
  432. GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
  433. GGML_API bool ggml_is_quantized(enum ggml_type type);
  434. // TODO: temporary until model loading of ggml examples is refactored
  435. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  436. GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
  437. GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  438. GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
  439. // use this to compute the memory overhead of a tensor
  440. GGML_API size_t ggml_tensor_overhead(void);
  441. // main
  442. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  443. GGML_API void ggml_free(struct ggml_context * ctx);
  444. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  445. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  446. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  447. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  448. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  449. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  450. GGML_API struct ggml_tensor * ggml_new_tensor(
  451. struct ggml_context * ctx,
  452. enum ggml_type type,
  453. int n_dims,
  454. const int64_t *ne);
  455. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  456. struct ggml_context * ctx,
  457. enum ggml_type type,
  458. int64_t ne0);
  459. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  460. struct ggml_context * ctx,
  461. enum ggml_type type,
  462. int64_t ne0,
  463. int64_t ne1);
  464. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  465. struct ggml_context * ctx,
  466. enum ggml_type type,
  467. int64_t ne0,
  468. int64_t ne1,
  469. int64_t ne2);
  470. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  471. struct ggml_context * ctx,
  472. enum ggml_type type,
  473. int64_t ne0,
  474. int64_t ne1,
  475. int64_t ne2,
  476. int64_t ne3);
  477. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  478. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  479. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  480. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
  481. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  482. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  483. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  484. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  485. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  486. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  487. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  488. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  489. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  490. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  491. GGML_API const char * ggml_get_name(const struct ggml_tensor * tensor);
  492. GGML_API struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name);
  493. GGML_API struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...);
  494. //
  495. // operations on tensors with backpropagation
  496. //
  497. GGML_API struct ggml_tensor * ggml_dup(
  498. struct ggml_context * ctx,
  499. struct ggml_tensor * a);
  500. GGML_API struct ggml_tensor * ggml_add(
  501. struct ggml_context * ctx,
  502. struct ggml_tensor * a,
  503. struct ggml_tensor * b);
  504. GGML_API struct ggml_tensor * ggml_add_inplace(
  505. struct ggml_context * ctx,
  506. struct ggml_tensor * a,
  507. struct ggml_tensor * b);
  508. GGML_API struct ggml_tensor * ggml_add1(
  509. struct ggml_context * ctx,
  510. struct ggml_tensor * a,
  511. struct ggml_tensor * b);
  512. GGML_API struct ggml_tensor * ggml_add1_inplace(
  513. struct ggml_context * ctx,
  514. struct ggml_tensor * a,
  515. struct ggml_tensor * b);
  516. GGML_API struct ggml_tensor * ggml_acc(
  517. struct ggml_context * ctx,
  518. struct ggml_tensor * a,
  519. struct ggml_tensor * b,
  520. size_t nb1,
  521. size_t nb2,
  522. size_t nb3,
  523. size_t offset);
  524. GGML_API struct ggml_tensor * ggml_acc_inplace(
  525. struct ggml_context * ctx,
  526. struct ggml_tensor * a,
  527. struct ggml_tensor * b,
  528. size_t nb1,
  529. size_t nb2,
  530. size_t nb3,
  531. size_t offset);
  532. GGML_API struct ggml_tensor * ggml_sub(
  533. struct ggml_context * ctx,
  534. struct ggml_tensor * a,
  535. struct ggml_tensor * b);
  536. GGML_API struct ggml_tensor * ggml_sub_inplace(
  537. struct ggml_context * ctx,
  538. struct ggml_tensor * a,
  539. struct ggml_tensor * b);
  540. GGML_API struct ggml_tensor * ggml_mul(
  541. struct ggml_context * ctx,
  542. struct ggml_tensor * a,
  543. struct ggml_tensor * b);
  544. GGML_API struct ggml_tensor * ggml_mul_inplace(
  545. struct ggml_context * ctx,
  546. struct ggml_tensor * a,
  547. struct ggml_tensor * b);
  548. GGML_API struct ggml_tensor * ggml_div(
  549. struct ggml_context * ctx,
  550. struct ggml_tensor * a,
  551. struct ggml_tensor * b);
  552. GGML_API struct ggml_tensor * ggml_div_inplace(
  553. struct ggml_context * ctx,
  554. struct ggml_tensor * a,
  555. struct ggml_tensor * b);
  556. GGML_API struct ggml_tensor * ggml_sqr(
  557. struct ggml_context * ctx,
  558. struct ggml_tensor * a);
  559. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  560. struct ggml_context * ctx,
  561. struct ggml_tensor * a);
  562. GGML_API struct ggml_tensor * ggml_sqrt(
  563. struct ggml_context * ctx,
  564. struct ggml_tensor * a);
  565. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  566. struct ggml_context * ctx,
  567. struct ggml_tensor * a);
  568. GGML_API struct ggml_tensor * ggml_log(
  569. struct ggml_context * ctx,
  570. struct ggml_tensor * a);
  571. GGML_API struct ggml_tensor * ggml_log_inplace(
  572. struct ggml_context * ctx,
  573. struct ggml_tensor * a);
  574. // return scalar
  575. GGML_API struct ggml_tensor * ggml_sum(
  576. struct ggml_context * ctx,
  577. struct ggml_tensor * a);
  578. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  579. GGML_API struct ggml_tensor * ggml_sum_rows(
  580. struct ggml_context * ctx,
  581. struct ggml_tensor * a);
  582. // mean along rows
  583. GGML_API struct ggml_tensor * ggml_mean(
  584. struct ggml_context * ctx,
  585. struct ggml_tensor * a);
  586. // if a is the same shape as b, and a is not parameter, return a
  587. // otherwise, return a new tensor: repeat(a) to fit in b
  588. GGML_API struct ggml_tensor * ggml_repeat(
  589. struct ggml_context * ctx,
  590. struct ggml_tensor * a,
  591. struct ggml_tensor * b);
  592. GGML_API struct ggml_tensor * ggml_repeat_back(
  593. struct ggml_context * ctx,
  594. struct ggml_tensor * a,
  595. struct ggml_tensor * b);
  596. GGML_API struct ggml_tensor * ggml_abs(
  597. struct ggml_context * ctx,
  598. struct ggml_tensor * a);
  599. GGML_API struct ggml_tensor * ggml_abs_inplace(
  600. struct ggml_context * ctx,
  601. struct ggml_tensor * a);
  602. GGML_API struct ggml_tensor * ggml_sgn(
  603. struct ggml_context * ctx,
  604. struct ggml_tensor * a);
  605. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  606. struct ggml_context * ctx,
  607. struct ggml_tensor * a);
  608. GGML_API struct ggml_tensor * ggml_neg(
  609. struct ggml_context * ctx,
  610. struct ggml_tensor * a);
  611. GGML_API struct ggml_tensor * ggml_neg_inplace(
  612. struct ggml_context * ctx,
  613. struct ggml_tensor * a);
  614. GGML_API struct ggml_tensor * ggml_step(
  615. struct ggml_context * ctx,
  616. struct ggml_tensor * a);
  617. GGML_API struct ggml_tensor * ggml_step_inplace(
  618. struct ggml_context * ctx,
  619. struct ggml_tensor * a);
  620. GGML_API struct ggml_tensor * ggml_relu(
  621. struct ggml_context * ctx,
  622. struct ggml_tensor * a);
  623. GGML_API struct ggml_tensor * ggml_relu_inplace(
  624. struct ggml_context * ctx,
  625. struct ggml_tensor * a);
  626. // TODO: double-check this computation is correct
  627. GGML_API struct ggml_tensor * ggml_gelu(
  628. struct ggml_context * ctx,
  629. struct ggml_tensor * a);
  630. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  631. struct ggml_context * ctx,
  632. struct ggml_tensor * a);
  633. GGML_API struct ggml_tensor * ggml_gelu_quick(
  634. struct ggml_context * ctx,
  635. struct ggml_tensor * a);
  636. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  637. struct ggml_context * ctx,
  638. struct ggml_tensor * a);
  639. GGML_API struct ggml_tensor * ggml_silu(
  640. struct ggml_context * ctx,
  641. struct ggml_tensor * a);
  642. GGML_API struct ggml_tensor * ggml_silu_inplace(
  643. struct ggml_context * ctx,
  644. struct ggml_tensor * a);
  645. // a - x
  646. // b - dy
  647. GGML_API struct ggml_tensor * ggml_silu_back(
  648. struct ggml_context * ctx,
  649. struct ggml_tensor * a,
  650. struct ggml_tensor * b);
  651. // normalize along rows
  652. // TODO: eps is hardcoded to 1e-5 for now
  653. GGML_API struct ggml_tensor * ggml_norm(
  654. struct ggml_context * ctx,
  655. struct ggml_tensor * a);
  656. GGML_API struct ggml_tensor * ggml_norm_inplace(
  657. struct ggml_context * ctx,
  658. struct ggml_tensor * a);
  659. GGML_API struct ggml_tensor * ggml_rms_norm(
  660. struct ggml_context * ctx,
  661. struct ggml_tensor * a);
  662. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  663. struct ggml_context * ctx,
  664. struct ggml_tensor * a);
  665. // a - x
  666. // b - dy
  667. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  668. struct ggml_context * ctx,
  669. struct ggml_tensor * a,
  670. struct ggml_tensor * b);
  671. // A: n columns, m rows
  672. // B: n columns, p rows (i.e. we transpose it internally)
  673. // result is m columns, p rows
  674. GGML_API struct ggml_tensor * ggml_mul_mat(
  675. struct ggml_context * ctx,
  676. struct ggml_tensor * a,
  677. struct ggml_tensor * b);
  678. // A: m columns, n rows,
  679. // B: p columns, n rows,
  680. // result is m columns, p rows
  681. GGML_API struct ggml_tensor * ggml_out_prod(
  682. struct ggml_context * ctx,
  683. struct ggml_tensor * a,
  684. struct ggml_tensor * b);
  685. //
  686. // operations on tensors without backpropagation
  687. //
  688. GGML_API struct ggml_tensor * ggml_scale(
  689. struct ggml_context * ctx,
  690. struct ggml_tensor * a,
  691. struct ggml_tensor * b);
  692. // in-place, returns view(a)
  693. GGML_API struct ggml_tensor * ggml_scale_inplace(
  694. struct ggml_context * ctx,
  695. struct ggml_tensor * a,
  696. struct ggml_tensor * b);
  697. // b -> view(a,offset,nb1,nb2,3), return modified a
  698. GGML_API struct ggml_tensor * ggml_set(
  699. struct ggml_context * ctx,
  700. struct ggml_tensor * a,
  701. struct ggml_tensor * b,
  702. size_t nb1,
  703. size_t nb2,
  704. size_t nb3,
  705. size_t offset);
  706. // b -> view(a,offset,nb1,nb2,3), return view(a)
  707. GGML_API struct ggml_tensor * ggml_set_inplace(
  708. struct ggml_context * ctx,
  709. struct ggml_tensor * a,
  710. struct ggml_tensor * b,
  711. size_t nb1,
  712. size_t nb2,
  713. size_t nb3,
  714. size_t offset);
  715. GGML_API struct ggml_tensor * ggml_set_1d(
  716. struct ggml_context * ctx,
  717. struct ggml_tensor * a,
  718. struct ggml_tensor * b,
  719. size_t offset);
  720. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  721. struct ggml_context * ctx,
  722. struct ggml_tensor * a,
  723. struct ggml_tensor * b,
  724. size_t offset);
  725. // b -> view(a,offset,nb1,nb2,3), return modified a
  726. GGML_API struct ggml_tensor * ggml_set_2d(
  727. struct ggml_context * ctx,
  728. struct ggml_tensor * a,
  729. struct ggml_tensor * b,
  730. size_t nb1,
  731. size_t offset);
  732. // b -> view(a,offset,nb1,nb2,3), return view(a)
  733. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  734. struct ggml_context * ctx,
  735. struct ggml_tensor * a,
  736. struct ggml_tensor * b,
  737. size_t nb1,
  738. size_t offset);
  739. // a -> b, return view(b)
  740. GGML_API struct ggml_tensor * ggml_cpy(
  741. struct ggml_context * ctx,
  742. struct ggml_tensor * a,
  743. struct ggml_tensor * b);
  744. // make contiguous
  745. GGML_API struct ggml_tensor * ggml_cont(
  746. struct ggml_context * ctx,
  747. struct ggml_tensor * a);
  748. // return view(a), b specifies the new shape
  749. // TODO: when we start computing gradient, make a copy instead of view
  750. GGML_API struct ggml_tensor * ggml_reshape(
  751. struct ggml_context * ctx,
  752. struct ggml_tensor * a,
  753. struct ggml_tensor * b);
  754. // return view(a)
  755. // TODO: when we start computing gradient, make a copy instead of view
  756. GGML_API struct ggml_tensor * ggml_reshape_1d(
  757. struct ggml_context * ctx,
  758. struct ggml_tensor * a,
  759. int64_t ne0);
  760. GGML_API struct ggml_tensor * ggml_reshape_2d(
  761. struct ggml_context * ctx,
  762. struct ggml_tensor * a,
  763. int64_t ne0,
  764. int64_t ne1);
  765. // return view(a)
  766. // TODO: when we start computing gradient, make a copy instead of view
  767. GGML_API struct ggml_tensor * ggml_reshape_3d(
  768. struct ggml_context * ctx,
  769. struct ggml_tensor * a,
  770. int64_t ne0,
  771. int64_t ne1,
  772. int64_t ne2);
  773. GGML_API struct ggml_tensor * ggml_reshape_4d(
  774. struct ggml_context * ctx,
  775. struct ggml_tensor * a,
  776. int64_t ne0,
  777. int64_t ne1,
  778. int64_t ne2,
  779. int64_t ne3);
  780. // offset in bytes
  781. GGML_API struct ggml_tensor * ggml_view_1d(
  782. struct ggml_context * ctx,
  783. struct ggml_tensor * a,
  784. int64_t ne0,
  785. size_t offset);
  786. GGML_API struct ggml_tensor * ggml_view_2d(
  787. struct ggml_context * ctx,
  788. struct ggml_tensor * a,
  789. int64_t ne0,
  790. int64_t ne1,
  791. size_t nb1, // row stride in bytes
  792. size_t offset);
  793. GGML_API struct ggml_tensor * ggml_view_3d(
  794. struct ggml_context * ctx,
  795. struct ggml_tensor * a,
  796. int64_t ne0,
  797. int64_t ne1,
  798. int64_t ne2,
  799. size_t nb1, // row stride in bytes
  800. size_t nb2, // slice stride in bytes
  801. size_t offset);
  802. GGML_API struct ggml_tensor * ggml_view_4d(
  803. struct ggml_context * ctx,
  804. struct ggml_tensor * a,
  805. int64_t ne0,
  806. int64_t ne1,
  807. int64_t ne2,
  808. int64_t ne3,
  809. size_t nb1, // row stride in bytes
  810. size_t nb2, // slice stride in bytes
  811. size_t nb3,
  812. size_t offset);
  813. GGML_API struct ggml_tensor * ggml_permute(
  814. struct ggml_context * ctx,
  815. struct ggml_tensor * a,
  816. int axis0,
  817. int axis1,
  818. int axis2,
  819. int axis3);
  820. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  821. GGML_API struct ggml_tensor * ggml_transpose(
  822. struct ggml_context * ctx,
  823. struct ggml_tensor * a);
  824. GGML_API struct ggml_tensor * ggml_get_rows(
  825. struct ggml_context * ctx,
  826. struct ggml_tensor * a,
  827. struct ggml_tensor * b);
  828. GGML_API struct ggml_tensor * ggml_get_rows_back(
  829. struct ggml_context * ctx,
  830. struct ggml_tensor * a,
  831. struct ggml_tensor * b,
  832. struct ggml_tensor * c);
  833. GGML_API struct ggml_tensor * ggml_diag(
  834. struct ggml_context * ctx,
  835. struct ggml_tensor * a);
  836. // set elements above the diagonal to -INF
  837. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  838. struct ggml_context * ctx,
  839. struct ggml_tensor * a,
  840. int n_past);
  841. // in-place, returns view(a)
  842. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  843. struct ggml_context * ctx,
  844. struct ggml_tensor * a,
  845. int n_past);
  846. // set elements above the diagonal to 0
  847. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  848. struct ggml_context * ctx,
  849. struct ggml_tensor * a,
  850. int n_past);
  851. // in-place, returns view(a)
  852. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  853. struct ggml_context * ctx,
  854. struct ggml_tensor * a,
  855. int n_past);
  856. GGML_API struct ggml_tensor * ggml_soft_max(
  857. struct ggml_context * ctx,
  858. struct ggml_tensor * a);
  859. // in-place, returns view(a)
  860. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  861. struct ggml_context * ctx,
  862. struct ggml_tensor * a);
  863. GGML_API struct ggml_tensor * ggml_soft_max_back(
  864. struct ggml_context * ctx,
  865. struct ggml_tensor * a,
  866. struct ggml_tensor * b);
  867. // in-place, returns view(a)
  868. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  869. struct ggml_context * ctx,
  870. struct ggml_tensor * a,
  871. struct ggml_tensor * b);
  872. // rotary position embedding
  873. // if mode & 1 == 1, skip n_past elements
  874. // if mode & 2 == 1, GPT-NeoX style
  875. // if mode & 4 == 1, ChatGLM style
  876. // TODO: avoid creating a new tensor every time
  877. GGML_API struct ggml_tensor * ggml_rope(
  878. struct ggml_context * ctx,
  879. struct ggml_tensor * a,
  880. int n_past,
  881. int n_dims,
  882. int mode,
  883. int n_ctx);
  884. // in-place, returns view(a)
  885. GGML_API struct ggml_tensor * ggml_rope_inplace(
  886. struct ggml_context * ctx,
  887. struct ggml_tensor * a,
  888. int n_past,
  889. int n_dims,
  890. int mode,
  891. int n_ctx);
  892. // rotary position embedding backward, i.e compute dx from dy
  893. // a - dy
  894. GGML_API struct ggml_tensor * ggml_rope_back(
  895. struct ggml_context * ctx,
  896. struct ggml_tensor * a,
  897. int n_past,
  898. int n_dims,
  899. int mode);
  900. // alibi position embedding
  901. // in-place, returns view(a)
  902. struct ggml_tensor * ggml_alibi(
  903. struct ggml_context * ctx,
  904. struct ggml_tensor * a,
  905. int n_past,
  906. int n_head,
  907. float bias_max);
  908. // clamp
  909. // in-place, returns view(a)
  910. struct ggml_tensor * ggml_clamp(
  911. struct ggml_context * ctx,
  912. struct ggml_tensor * a,
  913. float min,
  914. float max);
  915. // TODO: implement general-purpose convolutions
  916. // GGML_API struct ggml_tensor * ggml_conv_1d(
  917. // struct ggml_context * ctx,
  918. // struct ggml_tensor * a,
  919. // struct ggml_tensor * b,
  920. // int s0
  921. // int p0,
  922. // int d0);
  923. //
  924. // GGML_API struct ggml_tensor * ggml_conv_2d(
  925. // struct ggml_context * ctx,
  926. // struct ggml_tensor * a,
  927. // struct ggml_tensor * b,
  928. // int s0,
  929. // int s1,
  930. // int p0,
  931. // int p1,
  932. // int d0,
  933. // int d1);
  934. // padding = half
  935. // TODO: we don't support extra parameters for now
  936. // that's why we are hard-coding the stride, padding, and dilation
  937. // not great ..
  938. // example:
  939. // a: 3 80 768 1
  940. // b: 3000 80 1 1
  941. // res: 3000 768 1 1
  942. // used in whisper
  943. GGML_API struct ggml_tensor * ggml_conv_1d_s1_ph(
  944. struct ggml_context * ctx,
  945. struct ggml_tensor * a,
  946. struct ggml_tensor * b);
  947. // used in whisper
  948. GGML_API struct ggml_tensor * ggml_conv_1d_s2_ph(
  949. struct ggml_context * ctx,
  950. struct ggml_tensor * a,
  951. struct ggml_tensor * b);
  952. // kernel size is a->ne[0] x a->ne[1]
  953. // stride is equal to kernel size
  954. // padding is zero
  955. // example:
  956. // a: 16 16 3 768
  957. // b: 1024 1024 3 1
  958. // res: 64 64 768 1
  959. // used in sam
  960. GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
  961. struct ggml_context * ctx,
  962. struct ggml_tensor * a,
  963. struct ggml_tensor * b);
  964. GGML_API struct ggml_tensor * ggml_flash_attn(
  965. struct ggml_context * ctx,
  966. struct ggml_tensor * q,
  967. struct ggml_tensor * k,
  968. struct ggml_tensor * v,
  969. bool masked);
  970. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  971. struct ggml_context * ctx,
  972. struct ggml_tensor * q,
  973. struct ggml_tensor * k,
  974. struct ggml_tensor * v,
  975. struct ggml_tensor * d,
  976. bool masked);
  977. GGML_API struct ggml_tensor * ggml_flash_ff(
  978. struct ggml_context * ctx,
  979. struct ggml_tensor * a,
  980. struct ggml_tensor * b0,
  981. struct ggml_tensor * b1,
  982. struct ggml_tensor * c0,
  983. struct ggml_tensor * c1);
  984. // partition into non-overlapping windows with padding if needed
  985. // example:
  986. // a: 768 64 64 1
  987. // w: 14
  988. // res: 768 14 14 25
  989. // used in sam
  990. GGML_API struct ggml_tensor * ggml_win_part(
  991. struct ggml_context * ctx,
  992. struct ggml_tensor * a,
  993. int w);
  994. // reverse of ggml_win_part
  995. // used in sam
  996. GGML_API struct ggml_tensor * ggml_win_unpart(
  997. struct ggml_context * ctx,
  998. struct ggml_tensor * a,
  999. int w0,
  1000. int h0,
  1001. int w);
  1002. // custom operators
  1003. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1004. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1005. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1006. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1007. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1008. GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1009. struct ggml_context * ctx,
  1010. struct ggml_tensor * a,
  1011. ggml_unary_op_f32_t fun);
  1012. GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1013. struct ggml_context * ctx,
  1014. struct ggml_tensor * a,
  1015. ggml_unary_op_f32_t fun);
  1016. GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1017. struct ggml_context * ctx,
  1018. struct ggml_tensor * a,
  1019. struct ggml_tensor * b,
  1020. ggml_binary_op_f32_t fun);
  1021. GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1022. struct ggml_context * ctx,
  1023. struct ggml_tensor * a,
  1024. struct ggml_tensor * b,
  1025. ggml_binary_op_f32_t fun);
  1026. GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1027. struct ggml_context * ctx,
  1028. struct ggml_tensor * a,
  1029. ggml_custom1_op_f32_t fun);
  1030. GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1031. struct ggml_context * ctx,
  1032. struct ggml_tensor * a,
  1033. ggml_custom1_op_f32_t fun);
  1034. GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1035. struct ggml_context * ctx,
  1036. struct ggml_tensor * a,
  1037. struct ggml_tensor * b,
  1038. ggml_custom2_op_f32_t fun);
  1039. GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1040. struct ggml_context * ctx,
  1041. struct ggml_tensor * a,
  1042. struct ggml_tensor * b,
  1043. ggml_custom2_op_f32_t fun);
  1044. GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1045. struct ggml_context * ctx,
  1046. struct ggml_tensor * a,
  1047. struct ggml_tensor * b,
  1048. struct ggml_tensor * c,
  1049. ggml_custom3_op_f32_t fun);
  1050. GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1051. struct ggml_context * ctx,
  1052. struct ggml_tensor * a,
  1053. struct ggml_tensor * b,
  1054. struct ggml_tensor * c,
  1055. ggml_custom3_op_f32_t fun);
  1056. // loss function
  1057. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1058. struct ggml_context * ctx,
  1059. struct ggml_tensor * a,
  1060. struct ggml_tensor * b);
  1061. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1062. struct ggml_context * ctx,
  1063. struct ggml_tensor * a,
  1064. struct ggml_tensor * b,
  1065. struct ggml_tensor * c);
  1066. //
  1067. // automatic differentiation
  1068. //
  1069. GGML_API void ggml_set_param(
  1070. struct ggml_context * ctx,
  1071. struct ggml_tensor * tensor);
  1072. GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1073. GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
  1074. GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
  1075. GGML_API void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
  1076. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
  1077. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1078. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1079. GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1080. // print info and performance information for the graph
  1081. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1082. // dump the graph into a file using the dot format
  1083. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1084. //
  1085. // optimization
  1086. //
  1087. // optimization methods
  1088. enum ggml_opt_type {
  1089. GGML_OPT_ADAM,
  1090. GGML_OPT_LBFGS,
  1091. };
  1092. // linesearch methods
  1093. enum ggml_linesearch {
  1094. GGML_LINESEARCH_DEFAULT = 1,
  1095. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1096. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1097. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1098. };
  1099. // optimization return values
  1100. enum ggml_opt_result {
  1101. GGML_OPT_OK = 0,
  1102. GGML_OPT_DID_NOT_CONVERGE,
  1103. GGML_OPT_NO_CONTEXT,
  1104. GGML_OPT_INVALID_WOLFE,
  1105. GGML_OPT_FAIL,
  1106. GGML_LINESEARCH_FAIL = -128,
  1107. GGML_LINESEARCH_MINIMUM_STEP,
  1108. GGML_LINESEARCH_MAXIMUM_STEP,
  1109. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1110. GGML_LINESEARCH_INVALID_PARAMETERS,
  1111. };
  1112. // optimization parameters
  1113. //
  1114. // see ggml.c (ggml_opt_default_params) for default values
  1115. //
  1116. struct ggml_opt_params {
  1117. enum ggml_opt_type type;
  1118. int n_threads;
  1119. // delta-based convergence test
  1120. //
  1121. // if past == 0 - disabled
  1122. // if past > 0:
  1123. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1124. //
  1125. int past;
  1126. float delta;
  1127. // maximum number of iterations without improvement
  1128. //
  1129. // if 0 - disabled
  1130. // if > 0:
  1131. // assume convergence if no cost improvement in this number of iterations
  1132. //
  1133. int max_no_improvement;
  1134. bool print_forward_graph;
  1135. bool print_backward_graph;
  1136. // ADAM parameters
  1137. struct {
  1138. int n_iter;
  1139. float sched; // schedule multiplier (fixed, decay or warmup)
  1140. float decay; // weight decay for AdamW, use 0.0f to disable
  1141. float alpha; // learning rate
  1142. float beta1;
  1143. float beta2;
  1144. float eps; // epsilon for numerical stability
  1145. float eps_f; // epsilon for convergence test
  1146. float eps_g; // epsilon for convergence test
  1147. } adam;
  1148. // LBFGS parameters
  1149. struct {
  1150. int m; // number of corrections to approximate the inv. Hessian
  1151. int n_iter;
  1152. int max_linesearch;
  1153. float eps; // convergence tolerance
  1154. float ftol; // line search tolerance
  1155. float wolfe;
  1156. float min_step;
  1157. float max_step;
  1158. enum ggml_linesearch linesearch;
  1159. } lbfgs;
  1160. };
  1161. struct ggml_opt_context {
  1162. struct ggml_context * ctx;
  1163. struct ggml_opt_params params;
  1164. int iter;
  1165. int64_t nx; // number of parameter elements
  1166. bool just_initialized;
  1167. struct {
  1168. struct ggml_tensor * x; // view of the parameters
  1169. struct ggml_tensor * g1; // gradient
  1170. struct ggml_tensor * g2; // gradient squared
  1171. struct ggml_tensor * m; // first moment
  1172. struct ggml_tensor * v; // second moment
  1173. struct ggml_tensor * mh; // first moment hat
  1174. struct ggml_tensor * vh; // second moment hat
  1175. struct ggml_tensor * pf; // past function values
  1176. float fx_best;
  1177. float fx_prev;
  1178. int n_no_improvement;
  1179. } adam;
  1180. struct {
  1181. struct ggml_tensor * x; // current parameters
  1182. struct ggml_tensor * xp; // previous parameters
  1183. struct ggml_tensor * g; // current gradient
  1184. struct ggml_tensor * gp; // previous gradient
  1185. struct ggml_tensor * d; // search direction
  1186. struct ggml_tensor * pf; // past function values
  1187. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1188. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1189. struct ggml_tensor * lms; // the L-BFGS memory s
  1190. struct ggml_tensor * lmy; // the L-BFGS memory y
  1191. float fx_best;
  1192. float step;
  1193. int j;
  1194. int k;
  1195. int end;
  1196. int n_no_improvement;
  1197. } lbfgs;
  1198. };
  1199. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1200. // optimize the function defined by the tensor f
  1201. GGML_API enum ggml_opt_result ggml_opt(
  1202. struct ggml_context * ctx,
  1203. struct ggml_opt_params params,
  1204. struct ggml_tensor * f);
  1205. // initialize optimizer context
  1206. GGML_API void ggml_opt_init(
  1207. struct ggml_context * ctx,
  1208. struct ggml_opt_context * opt,
  1209. struct ggml_opt_params params,
  1210. int64_t nx);
  1211. // continue optimizing the function defined by the tensor f
  1212. GGML_API enum ggml_opt_result ggml_opt_resume(
  1213. struct ggml_context * ctx,
  1214. struct ggml_opt_context * opt,
  1215. struct ggml_tensor * f);
  1216. // continue optimizing the function defined by the tensor f
  1217. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1218. struct ggml_context * ctx,
  1219. struct ggml_opt_context * opt,
  1220. struct ggml_tensor * f,
  1221. struct ggml_cgraph * gf,
  1222. struct ggml_cgraph * gb);
  1223. //
  1224. // quantization
  1225. //
  1226. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1227. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1228. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1229. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1230. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1231. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
  1232. //
  1233. // system info
  1234. //
  1235. GGML_API int ggml_cpu_has_avx (void);
  1236. GGML_API int ggml_cpu_has_avx2 (void);
  1237. GGML_API int ggml_cpu_has_avx512 (void);
  1238. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1239. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1240. GGML_API int ggml_cpu_has_fma (void);
  1241. GGML_API int ggml_cpu_has_neon (void);
  1242. GGML_API int ggml_cpu_has_arm_fma (void);
  1243. GGML_API int ggml_cpu_has_f16c (void);
  1244. GGML_API int ggml_cpu_has_fp16_va (void);
  1245. GGML_API int ggml_cpu_has_wasm_simd (void);
  1246. GGML_API int ggml_cpu_has_blas (void);
  1247. GGML_API int ggml_cpu_has_cublas (void);
  1248. GGML_API int ggml_cpu_has_clblast (void);
  1249. GGML_API int ggml_cpu_has_gpublas (void);
  1250. GGML_API int ggml_cpu_has_sse3 (void);
  1251. GGML_API int ggml_cpu_has_vsx (void);
  1252. //
  1253. // Internal types and functions exposed for tests and benchmarks
  1254. //
  1255. #ifdef __cplusplus
  1256. // restrict not standard in C++
  1257. #define GGML_RESTRICT
  1258. #else
  1259. #define GGML_RESTRICT restrict
  1260. #endif
  1261. typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1262. typedef void (*quantize_row_q_t) (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1263. typedef void (*vec_dot_q_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
  1264. typedef struct {
  1265. dequantize_row_q_t dequantize_row_q;
  1266. quantize_row_q_t quantize_row_q;
  1267. quantize_row_q_t quantize_row_q_reference;
  1268. quantize_row_q_t quantize_row_q_dot;
  1269. vec_dot_q_t vec_dot_q;
  1270. enum ggml_type vec_dot_type;
  1271. } quantize_fns_t;
  1272. quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
  1273. #ifdef __cplusplus
  1274. }
  1275. #endif