ggml.h 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. #pragma once
  2. //
  3. // GGML Tensor Library
  4. //
  5. // This documentation is still a work in progress.
  6. // If you wish some specific topics to be covered, feel free to drop a comment:
  7. //
  8. // https://github.com/ggerganov/whisper.cpp/issues/40
  9. //
  10. // ## Overview
  11. //
  12. // This library implements:
  13. //
  14. // - a set of tensor operations
  15. // - automatic differentiation
  16. // - basic optimization algorithms
  17. //
  18. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  19. // but is not limited to, the following:
  20. //
  21. // - linear regression
  22. // - support vector machines
  23. // - neural networks
  24. //
  25. // The library allows the user to define a certain function using the available tensor operations. This function
  26. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  27. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  28. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  29. // using one of the available optimization algorithms.
  30. //
  31. // For example, here we define the function: f(x) = a*x^2 + b
  32. //
  33. // {
  34. // struct ggml_init_params params = {
  35. // .mem_size = 16*1024*1024,
  36. // .mem_buffer = NULL,
  37. // };
  38. //
  39. // // memory allocation happens here
  40. // struct ggml_context * ctx = ggml_init(params);
  41. //
  42. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  43. //
  44. // ggml_set_param(ctx, x); // x is an input variable
  45. //
  46. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  47. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  48. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  49. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  50. //
  51. // ...
  52. // }
  53. //
  54. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  55. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  56. //
  57. // {
  58. // ...
  59. //
  60. // struct ggml_cgraph gf = ggml_build_forward(f);
  61. //
  62. // // set the input variable and parameter values
  63. // ggml_set_f32(x, 2.0f);
  64. // ggml_set_f32(a, 3.0f);
  65. // ggml_set_f32(b, 4.0f);
  66. //
  67. // ggml_graph_compute(ctx0, &gf);
  68. //
  69. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  70. //
  71. // ...
  72. // }
  73. //
  74. // The actual computation is performed in the ggml_graph_compute() function.
  75. //
  76. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  77. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  78. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  79. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  80. // actually needed.
  81. //
  82. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  83. // differentiation and optimization algorithms.
  84. //
  85. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  86. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  87. // the user can avoid the memory allocation overhead at runtime.
  88. //
  89. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  90. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  91. //
  92. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  93. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  94. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  95. // yet, but a few examples are demonstrated in the following operations:
  96. //
  97. // - ggml_permute()
  98. // - ggml_conv_1d_1s()
  99. // - ggml_conv_1d_2s()
  100. //
  101. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  102. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  103. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  104. // calculus class, or watch the following video:
  105. //
  106. // What is Automatic Differentiation?
  107. // https://www.youtube.com/watch?v=wG_nF1awSSY
  108. //
  109. //
  110. // ## Tensor data (struct ggml_tensor)
  111. //
  112. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  113. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  114. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  115. //
  116. // {
  117. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  118. //
  119. // assert(c->src[0] == a);
  120. // assert(c->src[1] == b);
  121. // }
  122. //
  123. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  124. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  125. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  126. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  127. // contiguous in memory.
  128. //
  129. // The data of the tensor is accessed via the "data" pointer. For example:
  130. //
  131. // {
  132. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
  133. //
  134. // // a[1, 2] = 1.0f;
  135. // *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
  136. //
  137. // // a[2, 0] = 2.0f;
  138. // *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
  139. //
  140. // ...
  141. // }
  142. //
  143. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  144. //
  145. // ## The matrix multiplication operator (ggml_mul_mat)
  146. //
  147. // TODO
  148. //
  149. //
  150. // ## Multi-threading
  151. //
  152. // TODO
  153. //
  154. //
  155. // ## Overview of ggml.c
  156. //
  157. // TODO
  158. //
  159. //
  160. // ## SIMD optimizations
  161. //
  162. // TODO
  163. //
  164. //
  165. // ## Debugging ggml
  166. //
  167. // TODO
  168. //
  169. //
  170. #ifdef GGML_SHARED
  171. # if defined(_WIN32) && !defined(__MINGW32__)
  172. # ifdef GGML_BUILD
  173. # define GGML_API __declspec(dllexport)
  174. # else
  175. # define GGML_API __declspec(dllimport)
  176. # endif
  177. # else
  178. # define GGML_API __attribute__ ((visibility ("default")))
  179. # endif
  180. #else
  181. # define GGML_API
  182. #endif
  183. #include <stdint.h>
  184. #include <stddef.h>
  185. #include <stdbool.h>
  186. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  187. #define GGML_FILE_VERSION 1
  188. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  189. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  190. #define GGML_MAX_DIMS 4
  191. #define GGML_MAX_NODES 4096
  192. #define GGML_MAX_PARAMS 256
  193. #define GGML_MAX_CONTEXTS 64
  194. #define GGML_MAX_OPT 4
  195. #define GGML_MAX_NAME 32
  196. #define GGML_DEFAULT_N_THREADS 4
  197. #define GGML_ASSERT(x) \
  198. do { \
  199. if (!(x)) { \
  200. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  201. abort(); \
  202. } \
  203. } while (0)
  204. #ifdef __cplusplus
  205. extern "C" {
  206. #endif
  207. #ifdef __ARM_NEON
  208. // we use the built-in 16-bit float type
  209. typedef __fp16 ggml_fp16_t;
  210. #else
  211. typedef uint16_t ggml_fp16_t;
  212. #endif
  213. // convert FP16 <-> FP32
  214. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
  215. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
  216. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n);
  217. GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n);
  218. struct ggml_object;
  219. struct ggml_context;
  220. enum ggml_type {
  221. GGML_TYPE_F32 = 0,
  222. GGML_TYPE_F16 = 1,
  223. GGML_TYPE_Q4_0 = 2,
  224. GGML_TYPE_Q4_1 = 3,
  225. // GGML_TYPE_Q4_2 = 4, support has been removed
  226. // GGML_TYPE_Q4_3 (5) support has been removed
  227. GGML_TYPE_Q5_0 = 6,
  228. GGML_TYPE_Q5_1 = 7,
  229. GGML_TYPE_Q8_0 = 8,
  230. GGML_TYPE_Q8_1 = 9,
  231. // k-quantizations
  232. GGML_TYPE_Q2_K = 10,
  233. GGML_TYPE_Q3_K = 11,
  234. GGML_TYPE_Q4_K = 12,
  235. GGML_TYPE_Q5_K = 13,
  236. GGML_TYPE_Q6_K = 14,
  237. GGML_TYPE_Q8_K = 15,
  238. GGML_TYPE_I8,
  239. GGML_TYPE_I16,
  240. GGML_TYPE_I32,
  241. GGML_TYPE_COUNT,
  242. };
  243. enum ggml_backend {
  244. GGML_BACKEND_CPU = 0,
  245. GGML_BACKEND_GPU = 10,
  246. GGML_BACKEND_GPU_SPLIT = 20,
  247. };
  248. // model file types
  249. enum ggml_ftype {
  250. GGML_FTYPE_UNKNOWN = -1,
  251. GGML_FTYPE_ALL_F32 = 0,
  252. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  253. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  254. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  255. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  256. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  257. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  258. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  259. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  260. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  261. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  262. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  263. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  264. };
  265. // available tensor operations:
  266. enum ggml_op {
  267. GGML_OP_NONE = 0,
  268. GGML_OP_DUP,
  269. GGML_OP_ADD,
  270. GGML_OP_ADD1,
  271. GGML_OP_ACC,
  272. GGML_OP_SUB,
  273. GGML_OP_MUL,
  274. GGML_OP_DIV,
  275. GGML_OP_SQR,
  276. GGML_OP_SQRT,
  277. GGML_OP_LOG,
  278. GGML_OP_SUM,
  279. GGML_OP_SUM_ROWS,
  280. GGML_OP_MEAN,
  281. GGML_OP_REPEAT,
  282. GGML_OP_REPEAT_BACK,
  283. GGML_OP_ABS,
  284. GGML_OP_SGN,
  285. GGML_OP_NEG,
  286. GGML_OP_STEP,
  287. GGML_OP_RELU,
  288. GGML_OP_GELU,
  289. GGML_OP_GELU_QUICK,
  290. GGML_OP_SILU,
  291. GGML_OP_SILU_BACK,
  292. GGML_OP_NORM, // normalize
  293. GGML_OP_RMS_NORM,
  294. GGML_OP_RMS_NORM_BACK,
  295. GGML_OP_MUL_MAT,
  296. GGML_OP_OUT_PROD,
  297. GGML_OP_SCALE,
  298. GGML_OP_SET,
  299. GGML_OP_CPY,
  300. GGML_OP_CONT,
  301. GGML_OP_RESHAPE,
  302. GGML_OP_VIEW,
  303. GGML_OP_PERMUTE,
  304. GGML_OP_TRANSPOSE,
  305. GGML_OP_GET_ROWS,
  306. GGML_OP_GET_ROWS_BACK,
  307. GGML_OP_DIAG,
  308. GGML_OP_DIAG_MASK_INF,
  309. GGML_OP_DIAG_MASK_ZERO,
  310. GGML_OP_SOFT_MAX,
  311. GGML_OP_SOFT_MAX_BACK,
  312. GGML_OP_ROPE,
  313. GGML_OP_ROPE_BACK,
  314. GGML_OP_ALIBI,
  315. GGML_OP_CLAMP,
  316. GGML_OP_CONV_1D_S1_PH,
  317. GGML_OP_CONV_1D_S2_PH,
  318. GGML_OP_CONV_2D_SK_P0,
  319. GGML_OP_FLASH_ATTN,
  320. GGML_OP_FLASH_FF,
  321. GGML_OP_FLASH_ATTN_BACK,
  322. GGML_OP_WIN_PART,
  323. GGML_OP_WIN_UNPART,
  324. GGML_OP_MAP_UNARY,
  325. GGML_OP_MAP_BINARY,
  326. GGML_OP_CROSS_ENTROPY_LOSS,
  327. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  328. GGML_OP_COUNT,
  329. };
  330. // ggml object
  331. struct ggml_object {
  332. size_t offs;
  333. size_t size;
  334. struct ggml_object * next;
  335. char padding[8];
  336. };
  337. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  338. // n-dimensional tensor
  339. struct ggml_tensor {
  340. enum ggml_type type;
  341. enum ggml_backend backend;
  342. int n_dims;
  343. int64_t ne[GGML_MAX_DIMS]; // number of elements
  344. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  345. // nb[0] = sizeof(type)
  346. // nb[1] = nb[0] * ne[0] + padding
  347. // nb[i] = nb[i-1] * ne[i-1]
  348. // compute data
  349. enum ggml_op op;
  350. bool is_param;
  351. struct ggml_tensor * grad;
  352. struct ggml_tensor * src0;
  353. struct ggml_tensor * src1;
  354. struct ggml_tensor * opt[GGML_MAX_OPT];
  355. // thread scheduling
  356. int n_tasks;
  357. // performance
  358. int perf_runs;
  359. int64_t perf_cycles;
  360. int64_t perf_time_us;
  361. void * data;
  362. char name[GGML_MAX_NAME];
  363. void * extra; // extra things e.g. for ggml-cuda.cu
  364. char padding[4];
  365. };
  366. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  367. // computation graph
  368. struct ggml_cgraph {
  369. int n_nodes;
  370. int n_leafs;
  371. int n_threads;
  372. size_t work_size;
  373. struct ggml_tensor * work;
  374. struct ggml_tensor * nodes[GGML_MAX_NODES];
  375. struct ggml_tensor * grads[GGML_MAX_NODES];
  376. struct ggml_tensor * leafs[GGML_MAX_NODES];
  377. // performance
  378. int perf_runs;
  379. int64_t perf_cycles;
  380. int64_t perf_time_us;
  381. };
  382. // scratch buffer
  383. struct ggml_scratch {
  384. size_t offs;
  385. size_t size;
  386. void * data;
  387. };
  388. struct ggml_init_params {
  389. // memory pool
  390. size_t mem_size; // bytes
  391. void * mem_buffer; // if NULL, memory will be allocated internally
  392. bool no_alloc; // don't allocate memory for the tensor data
  393. };
  394. // compute types
  395. enum ggml_task_type {
  396. GGML_TASK_INIT = 0,
  397. GGML_TASK_COMPUTE,
  398. GGML_TASK_FINALIZE,
  399. };
  400. struct ggml_compute_params {
  401. enum ggml_task_type type;
  402. // ith = thread index, nth = number of threads
  403. int ith, nth;
  404. // work buffer for all threads
  405. size_t wsize;
  406. void * wdata;
  407. };
  408. // misc
  409. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  410. GGML_API int64_t ggml_time_ms(void);
  411. GGML_API int64_t ggml_time_us(void);
  412. GGML_API int64_t ggml_cycles(void);
  413. GGML_API int64_t ggml_cycles_per_ms(void);
  414. GGML_API void ggml_print_object (const struct ggml_object * obj);
  415. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  416. GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
  417. GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
  418. GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
  419. GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
  420. GGML_API int ggml_blck_size (enum ggml_type type);
  421. GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
  422. GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
  423. GGML_API const char * ggml_type_name(enum ggml_type type);
  424. GGML_API const char * ggml_op_name (enum ggml_op op);
  425. GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
  426. GGML_API bool ggml_is_quantized(enum ggml_type type);
  427. // TODO: temporary until model loading of ggml examples is refactored
  428. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  429. GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);
  430. GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  431. GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
  432. // use this to compute the memory overhead of a tensor
  433. GGML_API size_t ggml_tensor_overhead(void);
  434. // main
  435. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  436. GGML_API void ggml_free(struct ggml_context * ctx);
  437. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  438. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  439. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  440. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  441. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  442. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  443. GGML_API struct ggml_tensor * ggml_new_tensor(
  444. struct ggml_context * ctx,
  445. enum ggml_type type,
  446. int n_dims,
  447. const int64_t *ne);
  448. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  449. struct ggml_context * ctx,
  450. enum ggml_type type,
  451. int64_t ne0);
  452. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  453. struct ggml_context * ctx,
  454. enum ggml_type type,
  455. int64_t ne0,
  456. int64_t ne1);
  457. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  458. struct ggml_context * ctx,
  459. enum ggml_type type,
  460. int64_t ne0,
  461. int64_t ne1,
  462. int64_t ne2);
  463. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  464. struct ggml_context * ctx,
  465. enum ggml_type type,
  466. int64_t ne0,
  467. int64_t ne1,
  468. int64_t ne2,
  469. int64_t ne3);
  470. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  471. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  472. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  473. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
  474. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  475. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  476. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  477. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  478. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  479. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  480. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  481. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  482. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  483. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  484. GGML_API const char * ggml_get_name(const struct ggml_tensor * tensor);
  485. GGML_API struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name);
  486. //
  487. // operations on tensors with backpropagation
  488. //
  489. GGML_API struct ggml_tensor * ggml_dup(
  490. struct ggml_context * ctx,
  491. struct ggml_tensor * a);
  492. GGML_API struct ggml_tensor * ggml_add(
  493. struct ggml_context * ctx,
  494. struct ggml_tensor * a,
  495. struct ggml_tensor * b);
  496. GGML_API struct ggml_tensor * ggml_add_inplace(
  497. struct ggml_context * ctx,
  498. struct ggml_tensor * a,
  499. struct ggml_tensor * b);
  500. GGML_API struct ggml_tensor * ggml_add1(
  501. struct ggml_context * ctx,
  502. struct ggml_tensor * a,
  503. struct ggml_tensor * b);
  504. GGML_API struct ggml_tensor * ggml_add1_inplace(
  505. struct ggml_context * ctx,
  506. struct ggml_tensor * a,
  507. struct ggml_tensor * b);
  508. GGML_API struct ggml_tensor * ggml_acc(
  509. struct ggml_context * ctx,
  510. struct ggml_tensor * a,
  511. struct ggml_tensor * b,
  512. size_t nb1,
  513. size_t nb2,
  514. size_t nb3,
  515. size_t offset);
  516. GGML_API struct ggml_tensor * ggml_acc_inplace(
  517. struct ggml_context * ctx,
  518. struct ggml_tensor * a,
  519. struct ggml_tensor * b,
  520. size_t nb1,
  521. size_t nb2,
  522. size_t nb3,
  523. size_t offset);
  524. GGML_API struct ggml_tensor * ggml_sub(
  525. struct ggml_context * ctx,
  526. struct ggml_tensor * a,
  527. struct ggml_tensor * b);
  528. GGML_API struct ggml_tensor * ggml_sub_inplace(
  529. struct ggml_context * ctx,
  530. struct ggml_tensor * a,
  531. struct ggml_tensor * b);
  532. GGML_API struct ggml_tensor * ggml_mul(
  533. struct ggml_context * ctx,
  534. struct ggml_tensor * a,
  535. struct ggml_tensor * b);
  536. GGML_API struct ggml_tensor * ggml_mul_inplace(
  537. struct ggml_context * ctx,
  538. struct ggml_tensor * a,
  539. struct ggml_tensor * b);
  540. GGML_API struct ggml_tensor * ggml_div(
  541. struct ggml_context * ctx,
  542. struct ggml_tensor * a,
  543. struct ggml_tensor * b);
  544. GGML_API struct ggml_tensor * ggml_div_inplace(
  545. struct ggml_context * ctx,
  546. struct ggml_tensor * a,
  547. struct ggml_tensor * b);
  548. GGML_API struct ggml_tensor * ggml_sqr(
  549. struct ggml_context * ctx,
  550. struct ggml_tensor * a);
  551. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  552. struct ggml_context * ctx,
  553. struct ggml_tensor * a);
  554. GGML_API struct ggml_tensor * ggml_sqrt(
  555. struct ggml_context * ctx,
  556. struct ggml_tensor * a);
  557. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  558. struct ggml_context * ctx,
  559. struct ggml_tensor * a);
  560. GGML_API struct ggml_tensor * ggml_log(
  561. struct ggml_context * ctx,
  562. struct ggml_tensor * a);
  563. GGML_API struct ggml_tensor * ggml_log_inplace(
  564. struct ggml_context * ctx,
  565. struct ggml_tensor * a);
  566. // return scalar
  567. GGML_API struct ggml_tensor * ggml_sum(
  568. struct ggml_context * ctx,
  569. struct ggml_tensor * a);
  570. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  571. GGML_API struct ggml_tensor * ggml_sum_rows(
  572. struct ggml_context * ctx,
  573. struct ggml_tensor * a);
  574. // mean along rows
  575. GGML_API struct ggml_tensor * ggml_mean(
  576. struct ggml_context * ctx,
  577. struct ggml_tensor * a);
  578. // if a is the same shape as b, and a is not parameter, return a
  579. // otherwise, return a new tensor: repeat(a) to fit in b
  580. GGML_API struct ggml_tensor * ggml_repeat(
  581. struct ggml_context * ctx,
  582. struct ggml_tensor * a,
  583. struct ggml_tensor * b);
  584. GGML_API struct ggml_tensor * ggml_repeat_back(
  585. struct ggml_context * ctx,
  586. struct ggml_tensor * a,
  587. struct ggml_tensor * b);
  588. GGML_API struct ggml_tensor * ggml_abs(
  589. struct ggml_context * ctx,
  590. struct ggml_tensor * a);
  591. GGML_API struct ggml_tensor * ggml_abs_inplace(
  592. struct ggml_context * ctx,
  593. struct ggml_tensor * a);
  594. GGML_API struct ggml_tensor * ggml_sgn(
  595. struct ggml_context * ctx,
  596. struct ggml_tensor * a);
  597. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  598. struct ggml_context * ctx,
  599. struct ggml_tensor * a);
  600. GGML_API struct ggml_tensor * ggml_neg(
  601. struct ggml_context * ctx,
  602. struct ggml_tensor * a);
  603. GGML_API struct ggml_tensor * ggml_neg_inplace(
  604. struct ggml_context * ctx,
  605. struct ggml_tensor * a);
  606. GGML_API struct ggml_tensor * ggml_step(
  607. struct ggml_context * ctx,
  608. struct ggml_tensor * a);
  609. GGML_API struct ggml_tensor * ggml_step_inplace(
  610. struct ggml_context * ctx,
  611. struct ggml_tensor * a);
  612. GGML_API struct ggml_tensor * ggml_relu(
  613. struct ggml_context * ctx,
  614. struct ggml_tensor * a);
  615. GGML_API struct ggml_tensor * ggml_relu_inplace(
  616. struct ggml_context * ctx,
  617. struct ggml_tensor * a);
  618. // TODO: double-check this computation is correct
  619. GGML_API struct ggml_tensor * ggml_gelu(
  620. struct ggml_context * ctx,
  621. struct ggml_tensor * a);
  622. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  623. struct ggml_context * ctx,
  624. struct ggml_tensor * a);
  625. GGML_API struct ggml_tensor * ggml_gelu_quick(
  626. struct ggml_context * ctx,
  627. struct ggml_tensor * a);
  628. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  629. struct ggml_context * ctx,
  630. struct ggml_tensor * a);
  631. GGML_API struct ggml_tensor * ggml_silu(
  632. struct ggml_context * ctx,
  633. struct ggml_tensor * a);
  634. GGML_API struct ggml_tensor * ggml_silu_inplace(
  635. struct ggml_context * ctx,
  636. struct ggml_tensor * a);
  637. // a - x
  638. // b - dy
  639. GGML_API struct ggml_tensor * ggml_silu_back(
  640. struct ggml_context * ctx,
  641. struct ggml_tensor * a,
  642. struct ggml_tensor * b);
  643. // normalize along rows
  644. // TODO: eps is hardcoded to 1e-5 for now
  645. GGML_API struct ggml_tensor * ggml_norm(
  646. struct ggml_context * ctx,
  647. struct ggml_tensor * a);
  648. GGML_API struct ggml_tensor * ggml_norm_inplace(
  649. struct ggml_context * ctx,
  650. struct ggml_tensor * a);
  651. GGML_API struct ggml_tensor * ggml_rms_norm(
  652. struct ggml_context * ctx,
  653. struct ggml_tensor * a);
  654. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  655. struct ggml_context * ctx,
  656. struct ggml_tensor * a);
  657. // a - x
  658. // b - dy
  659. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  660. struct ggml_context * ctx,
  661. struct ggml_tensor * a,
  662. struct ggml_tensor * b);
  663. // A: n columns, m rows
  664. // B: n columns, p rows (i.e. we transpose it internally)
  665. // result is m columns, p rows
  666. GGML_API struct ggml_tensor * ggml_mul_mat(
  667. struct ggml_context * ctx,
  668. struct ggml_tensor * a,
  669. struct ggml_tensor * b);
  670. // A: m columns, n rows,
  671. // B: p columns, n rows,
  672. // result is m columns, p rows
  673. GGML_API struct ggml_tensor * ggml_out_prod(
  674. struct ggml_context * ctx,
  675. struct ggml_tensor * a,
  676. struct ggml_tensor * b);
  677. //
  678. // operations on tensors without backpropagation
  679. //
  680. GGML_API struct ggml_tensor * ggml_scale(
  681. struct ggml_context * ctx,
  682. struct ggml_tensor * a,
  683. struct ggml_tensor * b);
  684. // in-place, returns view(a)
  685. GGML_API struct ggml_tensor * ggml_scale_inplace(
  686. struct ggml_context * ctx,
  687. struct ggml_tensor * a,
  688. struct ggml_tensor * b);
  689. // b -> view(a,offset,nb1,nb2,3), return modified a
  690. GGML_API struct ggml_tensor * ggml_set(
  691. struct ggml_context * ctx,
  692. struct ggml_tensor * a,
  693. struct ggml_tensor * b,
  694. size_t nb1,
  695. size_t nb2,
  696. size_t nb3,
  697. size_t offset);
  698. // b -> view(a,offset,nb1,nb2,3), return view(a)
  699. GGML_API struct ggml_tensor * ggml_set_inplace(
  700. struct ggml_context * ctx,
  701. struct ggml_tensor * a,
  702. struct ggml_tensor * b,
  703. size_t nb1,
  704. size_t nb2,
  705. size_t nb3,
  706. size_t offset);
  707. GGML_API struct ggml_tensor * ggml_set_1d(
  708. struct ggml_context * ctx,
  709. struct ggml_tensor * a,
  710. struct ggml_tensor * b,
  711. size_t offset);
  712. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  713. struct ggml_context * ctx,
  714. struct ggml_tensor * a,
  715. struct ggml_tensor * b,
  716. size_t offset);
  717. // b -> view(a,offset,nb1,nb2,3), return modified a
  718. GGML_API struct ggml_tensor * ggml_set_2d(
  719. struct ggml_context * ctx,
  720. struct ggml_tensor * a,
  721. struct ggml_tensor * b,
  722. size_t nb1,
  723. size_t offset);
  724. // b -> view(a,offset,nb1,nb2,3), return view(a)
  725. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  726. struct ggml_context * ctx,
  727. struct ggml_tensor * a,
  728. struct ggml_tensor * b,
  729. size_t nb1,
  730. size_t offset);
  731. // a -> b, return view(b)
  732. GGML_API struct ggml_tensor * ggml_cpy(
  733. struct ggml_context * ctx,
  734. struct ggml_tensor * a,
  735. struct ggml_tensor * b);
  736. // make contiguous
  737. GGML_API struct ggml_tensor * ggml_cont(
  738. struct ggml_context * ctx,
  739. struct ggml_tensor * a);
  740. // return view(a), b specifies the new shape
  741. // TODO: when we start computing gradient, make a copy instead of view
  742. GGML_API struct ggml_tensor * ggml_reshape(
  743. struct ggml_context * ctx,
  744. struct ggml_tensor * a,
  745. struct ggml_tensor * b);
  746. // return view(a)
  747. // TODO: when we start computing gradient, make a copy instead of view
  748. GGML_API struct ggml_tensor * ggml_reshape_1d(
  749. struct ggml_context * ctx,
  750. struct ggml_tensor * a,
  751. int64_t ne0);
  752. GGML_API struct ggml_tensor * ggml_reshape_2d(
  753. struct ggml_context * ctx,
  754. struct ggml_tensor * a,
  755. int64_t ne0,
  756. int64_t ne1);
  757. // return view(a)
  758. // TODO: when we start computing gradient, make a copy instead of view
  759. GGML_API struct ggml_tensor * ggml_reshape_3d(
  760. struct ggml_context * ctx,
  761. struct ggml_tensor * a,
  762. int64_t ne0,
  763. int64_t ne1,
  764. int64_t ne2);
  765. GGML_API struct ggml_tensor * ggml_reshape_4d(
  766. struct ggml_context * ctx,
  767. struct ggml_tensor * a,
  768. int64_t ne0,
  769. int64_t ne1,
  770. int64_t ne2,
  771. int64_t ne3);
  772. // offset in bytes
  773. GGML_API struct ggml_tensor * ggml_view_1d(
  774. struct ggml_context * ctx,
  775. struct ggml_tensor * a,
  776. int64_t ne0,
  777. size_t offset);
  778. GGML_API struct ggml_tensor * ggml_view_2d(
  779. struct ggml_context * ctx,
  780. struct ggml_tensor * a,
  781. int64_t ne0,
  782. int64_t ne1,
  783. size_t nb1, // row stride in bytes
  784. size_t offset);
  785. GGML_API struct ggml_tensor * ggml_view_3d(
  786. struct ggml_context * ctx,
  787. struct ggml_tensor * a,
  788. int64_t ne0,
  789. int64_t ne1,
  790. int64_t ne2,
  791. size_t nb1, // row stride in bytes
  792. size_t nb2, // slice stride in bytes
  793. size_t offset);
  794. GGML_API struct ggml_tensor * ggml_view_4d(
  795. struct ggml_context * ctx,
  796. struct ggml_tensor * a,
  797. int64_t ne0,
  798. int64_t ne1,
  799. int64_t ne2,
  800. int64_t ne3,
  801. size_t nb1, // row stride in bytes
  802. size_t nb2, // slice stride in bytes
  803. size_t nb3,
  804. size_t offset);
  805. GGML_API struct ggml_tensor * ggml_permute(
  806. struct ggml_context * ctx,
  807. struct ggml_tensor * a,
  808. int axis0,
  809. int axis1,
  810. int axis2,
  811. int axis3);
  812. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  813. GGML_API struct ggml_tensor * ggml_transpose(
  814. struct ggml_context * ctx,
  815. struct ggml_tensor * a);
  816. GGML_API struct ggml_tensor * ggml_get_rows(
  817. struct ggml_context * ctx,
  818. struct ggml_tensor * a,
  819. struct ggml_tensor * b);
  820. GGML_API struct ggml_tensor * ggml_get_rows_back(
  821. struct ggml_context * ctx,
  822. struct ggml_tensor * a,
  823. struct ggml_tensor * b,
  824. struct ggml_tensor * c);
  825. GGML_API struct ggml_tensor * ggml_diag(
  826. struct ggml_context * ctx,
  827. struct ggml_tensor * a);
  828. // set elements above the diagonal to -INF
  829. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  830. struct ggml_context * ctx,
  831. struct ggml_tensor * a,
  832. int n_past);
  833. // in-place, returns view(a)
  834. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  835. struct ggml_context * ctx,
  836. struct ggml_tensor * a,
  837. int n_past);
  838. // set elements above the diagonal to 0
  839. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  840. struct ggml_context * ctx,
  841. struct ggml_tensor * a,
  842. int n_past);
  843. // in-place, returns view(a)
  844. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  845. struct ggml_context * ctx,
  846. struct ggml_tensor * a,
  847. int n_past);
  848. GGML_API struct ggml_tensor * ggml_soft_max(
  849. struct ggml_context * ctx,
  850. struct ggml_tensor * a);
  851. // in-place, returns view(a)
  852. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  853. struct ggml_context * ctx,
  854. struct ggml_tensor * a);
  855. GGML_API struct ggml_tensor * ggml_soft_max_back(
  856. struct ggml_context * ctx,
  857. struct ggml_tensor * a,
  858. struct ggml_tensor * b);
  859. // in-place, returns view(a)
  860. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  861. struct ggml_context * ctx,
  862. struct ggml_tensor * a,
  863. struct ggml_tensor * b);
  864. // rotary position embedding
  865. // if mode & 1 == 1, skip n_past elements
  866. // if mode & 2 == 1, GPT-NeoX style
  867. // TODO: avoid creating a new tensor every time
  868. GGML_API struct ggml_tensor * ggml_rope(
  869. struct ggml_context * ctx,
  870. struct ggml_tensor * a,
  871. int n_past,
  872. int n_dims,
  873. int mode);
  874. // in-place, returns view(a)
  875. GGML_API struct ggml_tensor * ggml_rope_inplace(
  876. struct ggml_context * ctx,
  877. struct ggml_tensor * a,
  878. int n_past,
  879. int n_dims,
  880. int mode);
  881. // rotary position embedding backward, i.e compute dx from dy
  882. // a - dy
  883. GGML_API struct ggml_tensor * ggml_rope_back(
  884. struct ggml_context * ctx,
  885. struct ggml_tensor * a,
  886. int n_past,
  887. int n_dims,
  888. int mode);
  889. // alibi position embedding
  890. // in-place, returns view(a)
  891. struct ggml_tensor * ggml_alibi(
  892. struct ggml_context * ctx,
  893. struct ggml_tensor * a,
  894. int n_past,
  895. int n_head,
  896. float bias_max);
  897. // clamp
  898. // in-place, returns view(a)
  899. struct ggml_tensor * ggml_clamp(
  900. struct ggml_context * ctx,
  901. struct ggml_tensor * a,
  902. float min,
  903. float max);
  904. // TODO: implement general-purpose convolutions
  905. // GGML_API struct ggml_tensor * ggml_conv_1d(
  906. // struct ggml_context * ctx,
  907. // struct ggml_tensor * a,
  908. // struct ggml_tensor * b,
  909. // int s0
  910. // int p0,
  911. // int d0);
  912. //
  913. // GGML_API struct ggml_tensor * ggml_conv_2d(
  914. // struct ggml_context * ctx,
  915. // struct ggml_tensor * a,
  916. // struct ggml_tensor * b,
  917. // int s0,
  918. // int s1,
  919. // int p0,
  920. // int p1,
  921. // int d0,
  922. // int d1);
  923. // padding = half
  924. // TODO: we don't support extra parameters for now
  925. // that's why we are hard-coding the stride, padding, and dilation
  926. // not great ..
  927. // example:
  928. // a: 3 80 768 1
  929. // b: 3000 80 1 1
  930. // res: 3000 768 1 1
  931. // used in whisper
  932. GGML_API struct ggml_tensor * ggml_conv_1d_s1_ph(
  933. struct ggml_context * ctx,
  934. struct ggml_tensor * a,
  935. struct ggml_tensor * b);
  936. // used in whisper
  937. GGML_API struct ggml_tensor * ggml_conv_1d_s2_ph(
  938. struct ggml_context * ctx,
  939. struct ggml_tensor * a,
  940. struct ggml_tensor * b);
  941. // kernel size is a->ne[0] x a->ne[1]
  942. // stride is equal to kernel size
  943. // padding is zero
  944. // example:
  945. // a: 16 16 3 768
  946. // b: 1024 1024 3 1
  947. // res: 64 64 768 1
  948. // used in sam
  949. GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
  950. struct ggml_context * ctx,
  951. struct ggml_tensor * a,
  952. struct ggml_tensor * b);
  953. GGML_API struct ggml_tensor * ggml_flash_attn(
  954. struct ggml_context * ctx,
  955. struct ggml_tensor * q,
  956. struct ggml_tensor * k,
  957. struct ggml_tensor * v,
  958. bool masked);
  959. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  960. struct ggml_context * ctx,
  961. struct ggml_tensor * q,
  962. struct ggml_tensor * k,
  963. struct ggml_tensor * v,
  964. struct ggml_tensor * d,
  965. bool masked);
  966. GGML_API struct ggml_tensor * ggml_flash_ff(
  967. struct ggml_context * ctx,
  968. struct ggml_tensor * a,
  969. struct ggml_tensor * b0,
  970. struct ggml_tensor * b1,
  971. struct ggml_tensor * c0,
  972. struct ggml_tensor * c1);
  973. // partition into non-overlapping windows with padding if needed
  974. // example:
  975. // a: 768 64 64 1
  976. // w: 14
  977. // res: 768 14 14 25
  978. // used in sam
  979. GGML_API struct ggml_tensor * ggml_win_part(
  980. struct ggml_context * ctx,
  981. struct ggml_tensor * a,
  982. int w);
  983. // reverse of ggml_win_part
  984. // used in sam
  985. GGML_API struct ggml_tensor * ggml_win_unpart(
  986. struct ggml_context * ctx,
  987. struct ggml_tensor * a,
  988. int w0,
  989. int h0,
  990. int w);
  991. // Mapping operations
  992. typedef void (*ggml_unary_op_f32_t)(const int, float *, const float *);
  993. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  994. GGML_API struct ggml_tensor * ggml_map_unary_f32(
  995. struct ggml_context * ctx,
  996. struct ggml_tensor * a,
  997. ggml_unary_op_f32_t fun);
  998. GGML_API struct ggml_tensor * ggml_map_binary_f32(
  999. struct ggml_context * ctx,
  1000. struct ggml_tensor * a,
  1001. struct ggml_tensor * b,
  1002. ggml_binary_op_f32_t fun);
  1003. // loss function
  1004. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1005. struct ggml_context * ctx,
  1006. struct ggml_tensor * a,
  1007. struct ggml_tensor * b);
  1008. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1009. struct ggml_context * ctx,
  1010. struct ggml_tensor * a,
  1011. struct ggml_tensor * b,
  1012. struct ggml_tensor * c);
  1013. //
  1014. // automatic differentiation
  1015. //
  1016. GGML_API void ggml_set_param(
  1017. struct ggml_context * ctx,
  1018. struct ggml_tensor * tensor);
  1019. GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1020. GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
  1021. GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
  1022. GGML_API void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
  1023. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
  1024. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1025. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1026. GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1027. // print info and performance information for the graph
  1028. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1029. // dump the graph into a file using the dot format
  1030. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1031. //
  1032. // optimization
  1033. //
  1034. // optimization methods
  1035. enum ggml_opt_type {
  1036. GGML_OPT_ADAM,
  1037. GGML_OPT_LBFGS,
  1038. };
  1039. // linesearch methods
  1040. enum ggml_linesearch {
  1041. GGML_LINESEARCH_DEFAULT = 1,
  1042. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1043. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1044. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1045. };
  1046. // optimization return values
  1047. enum ggml_opt_result {
  1048. GGML_OPT_OK = 0,
  1049. GGML_OPT_DID_NOT_CONVERGE,
  1050. GGML_OPT_NO_CONTEXT,
  1051. GGML_OPT_INVALID_WOLFE,
  1052. GGML_OPT_FAIL,
  1053. GGML_LINESEARCH_FAIL = -128,
  1054. GGML_LINESEARCH_MINIMUM_STEP,
  1055. GGML_LINESEARCH_MAXIMUM_STEP,
  1056. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1057. GGML_LINESEARCH_INVALID_PARAMETERS,
  1058. };
  1059. // optimization parameters
  1060. //
  1061. // see ggml.c (ggml_opt_default_params) for default values
  1062. //
  1063. struct ggml_opt_params {
  1064. enum ggml_opt_type type;
  1065. int n_threads;
  1066. // delta-based convergence test
  1067. //
  1068. // if past == 0 - disabled
  1069. // if past > 0:
  1070. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1071. //
  1072. int past;
  1073. float delta;
  1074. // maximum number of iterations without improvement
  1075. //
  1076. // if 0 - disabled
  1077. // if > 0:
  1078. // assume convergence if no cost improvement in this number of iterations
  1079. //
  1080. int max_no_improvement;
  1081. bool print_forward_graph;
  1082. bool print_backward_graph;
  1083. // ADAM parameters
  1084. struct {
  1085. int n_iter;
  1086. float sched; // schedule multiplier (fixed, decay or warmup)
  1087. float decay; // weight decay for AdamW, use 0.0f to disable
  1088. float alpha; // learning rate
  1089. float beta1;
  1090. float beta2;
  1091. float eps; // epsilon for numerical stability
  1092. float eps_f; // epsilon for convergence test
  1093. float eps_g; // epsilon for convergence test
  1094. } adam;
  1095. // LBFGS parameters
  1096. struct {
  1097. int m; // number of corrections to approximate the inv. Hessian
  1098. int n_iter;
  1099. int max_linesearch;
  1100. float eps; // convergence tolerance
  1101. float ftol; // line search tolerance
  1102. float wolfe;
  1103. float min_step;
  1104. float max_step;
  1105. enum ggml_linesearch linesearch;
  1106. } lbfgs;
  1107. };
  1108. struct ggml_opt_context {
  1109. struct ggml_context * ctx;
  1110. struct ggml_opt_params params;
  1111. int iter;
  1112. int64_t nx; // number of parameter elements
  1113. bool just_initialized;
  1114. struct {
  1115. struct ggml_tensor * x; // view of the parameters
  1116. struct ggml_tensor * g1; // gradient
  1117. struct ggml_tensor * g2; // gradient squared
  1118. struct ggml_tensor * m; // first moment
  1119. struct ggml_tensor * v; // second moment
  1120. struct ggml_tensor * mh; // first moment hat
  1121. struct ggml_tensor * vh; // second moment hat
  1122. struct ggml_tensor * pf; // past function values
  1123. float fx_best;
  1124. float fx_prev;
  1125. int n_no_improvement;
  1126. } adam;
  1127. struct {
  1128. struct ggml_tensor * x; // current parameters
  1129. struct ggml_tensor * xp; // previous parameters
  1130. struct ggml_tensor * g; // current gradient
  1131. struct ggml_tensor * gp; // previous gradient
  1132. struct ggml_tensor * d; // search direction
  1133. struct ggml_tensor * pf; // past function values
  1134. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1135. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1136. struct ggml_tensor * lms; // the L-BFGS memory s
  1137. struct ggml_tensor * lmy; // the L-BFGS memory y
  1138. float fx_best;
  1139. float step;
  1140. int j;
  1141. int k;
  1142. int end;
  1143. int n_no_improvement;
  1144. } lbfgs;
  1145. };
  1146. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1147. // optimize the function defined by the tensor f
  1148. GGML_API enum ggml_opt_result ggml_opt(
  1149. struct ggml_context * ctx,
  1150. struct ggml_opt_params params,
  1151. struct ggml_tensor * f);
  1152. // initialize optimizer context
  1153. GGML_API void ggml_opt_init(
  1154. struct ggml_context * ctx,
  1155. struct ggml_opt_context * opt,
  1156. struct ggml_opt_params params,
  1157. int64_t nx);
  1158. // continue optimizing the function defined by the tensor f
  1159. GGML_API enum ggml_opt_result ggml_opt_resume(
  1160. struct ggml_context * ctx,
  1161. struct ggml_opt_context * opt,
  1162. struct ggml_tensor * f);
  1163. // continue optimizing the function defined by the tensor f
  1164. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1165. struct ggml_context * ctx,
  1166. struct ggml_opt_context * opt,
  1167. struct ggml_tensor * f,
  1168. struct ggml_cgraph * gf,
  1169. struct ggml_cgraph * gb);
  1170. //
  1171. // quantization
  1172. //
  1173. GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1174. GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1175. GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1176. GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
  1177. GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
  1178. GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
  1179. //
  1180. // system info
  1181. //
  1182. GGML_API int ggml_cpu_has_avx (void);
  1183. GGML_API int ggml_cpu_has_avx2 (void);
  1184. GGML_API int ggml_cpu_has_avx512 (void);
  1185. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  1186. GGML_API int ggml_cpu_has_avx512_vnni(void);
  1187. GGML_API int ggml_cpu_has_fma (void);
  1188. GGML_API int ggml_cpu_has_neon (void);
  1189. GGML_API int ggml_cpu_has_arm_fma (void);
  1190. GGML_API int ggml_cpu_has_f16c (void);
  1191. GGML_API int ggml_cpu_has_fp16_va (void);
  1192. GGML_API int ggml_cpu_has_wasm_simd (void);
  1193. GGML_API int ggml_cpu_has_blas (void);
  1194. GGML_API int ggml_cpu_has_cublas (void);
  1195. GGML_API int ggml_cpu_has_clblast (void);
  1196. GGML_API int ggml_cpu_has_gpublas (void);
  1197. GGML_API int ggml_cpu_has_sse3 (void);
  1198. GGML_API int ggml_cpu_has_vsx (void);
  1199. //
  1200. // Internal types and functions exposed for tests and benchmarks
  1201. //
  1202. #ifdef __cplusplus
  1203. // restrict not standard in C++
  1204. #define GGML_RESTRICT
  1205. #else
  1206. #define GGML_RESTRICT restrict
  1207. #endif
  1208. typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
  1209. typedef void (*quantize_row_q_t) (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
  1210. typedef void (*vec_dot_q_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
  1211. typedef struct {
  1212. dequantize_row_q_t dequantize_row_q;
  1213. quantize_row_q_t quantize_row_q;
  1214. quantize_row_q_t quantize_row_q_reference;
  1215. quantize_row_q_t quantize_row_q_dot;
  1216. vec_dot_q_t vec_dot_q;
  1217. enum ggml_type vec_dot_type;
  1218. } quantize_fns_t;
  1219. quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
  1220. #ifdef __cplusplus
  1221. }
  1222. #endif