llama.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. #ifndef LLAMA_H
  2. #define LLAMA_H
  3. #include "ggml.h"
  4. #ifdef GGML_USE_CUBLAS
  5. #include "ggml-cuda.h"
  6. #define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
  7. #else
  8. #define LLAMA_MAX_DEVICES 1
  9. #endif // GGML_USE_CUBLAS
  10. #include <stddef.h>
  11. #include <stdint.h>
  12. #include <stdio.h>
  13. #include <stdbool.h>
  14. #ifdef LLAMA_SHARED
  15. # if defined(_WIN32) && !defined(__MINGW32__)
  16. # ifdef LLAMA_BUILD
  17. # define LLAMA_API __declspec(dllexport)
  18. # else
  19. # define LLAMA_API __declspec(dllimport)
  20. # endif
  21. # else
  22. # define LLAMA_API __attribute__ ((visibility ("default")))
  23. # endif
  24. #else
  25. # define LLAMA_API
  26. #endif
  27. #ifdef __GNUC__
  28. # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  29. #elif defined(_MSC_VER)
  30. # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  31. #else
  32. # define DEPRECATED(func, hint) func
  33. #endif
  34. #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
  35. #define LLAMA_MAX_RNG_STATE (64*1024)
  36. #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
  37. #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
  38. #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
  39. #define LLAMA_SESSION_VERSION 3
  40. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
  41. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  42. #define LLAMA_SUPPORTS_GPU_OFFLOAD
  43. #endif
  44. #ifdef __cplusplus
  45. extern "C" {
  46. #endif
  47. //
  48. // C interface
  49. //
  50. // TODO: show sample usage
  51. //
  52. struct llama_model;
  53. struct llama_context;
  54. typedef int32_t llama_pos;
  55. typedef int32_t llama_token;
  56. typedef int32_t llama_seq_id;
  57. enum llama_vocab_type {
  58. LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
  59. LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
  60. };
  61. enum llama_token_type {
  62. LLAMA_TOKEN_TYPE_UNDEFINED = 0,
  63. LLAMA_TOKEN_TYPE_NORMAL = 1,
  64. LLAMA_TOKEN_TYPE_UNKNOWN = 2,
  65. LLAMA_TOKEN_TYPE_CONTROL = 3,
  66. LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
  67. LLAMA_TOKEN_TYPE_UNUSED = 5,
  68. LLAMA_TOKEN_TYPE_BYTE = 6,
  69. };
  70. // model file types
  71. enum llama_ftype {
  72. LLAMA_FTYPE_ALL_F32 = 0,
  73. LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  74. LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  75. LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  76. LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  77. // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
  78. // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
  79. LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  80. LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  81. LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  82. LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  83. LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
  84. LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
  85. LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
  86. LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
  87. LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
  88. LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
  89. LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
  90. LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
  91. LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
  92. };
  93. enum llama_rope_scaling_type {
  94. LLAMA_ROPE_SCALING_UNSPECIFIED = -1,
  95. LLAMA_ROPE_SCALING_NONE = 0,
  96. LLAMA_ROPE_SCALING_LINEAR = 1,
  97. LLAMA_ROPE_SCALING_YARN = 2,
  98. LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
  99. };
  100. typedef struct llama_token_data {
  101. llama_token id; // token id
  102. float logit; // log-odds of the token
  103. float p; // probability of the token
  104. } llama_token_data;
  105. typedef struct llama_token_data_array {
  106. llama_token_data * data;
  107. size_t size;
  108. bool sorted;
  109. } llama_token_data_array;
  110. typedef bool (*llama_progress_callback)(float progress, void *ctx);
  111. // Input data for llama_decode
  112. // A llama_batch object can contain input about one or many sequences
  113. // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
  114. //
  115. // - token : the token ids of the input (used when embd is NULL)
  116. // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
  117. // - pos : the positions of the respective token in the sequence
  118. // - seq_id : the sequence to which the respective token belongs
  119. // - logits : if zero, the logits for the respective token will not be output
  120. //
  121. typedef struct llama_batch {
  122. int32_t n_tokens;
  123. llama_token * token;
  124. float * embd;
  125. llama_pos * pos;
  126. int32_t * n_seq_id;
  127. llama_seq_id ** seq_id;
  128. int8_t * logits;
  129. // NOTE: helpers for smooth API transition - can be deprecated in the future
  130. // for future-proof code, use the above fields instead and ignore everything below
  131. //
  132. // pos[i] = all_pos_0 + i*all_pos_1
  133. //
  134. llama_pos all_pos_0; // used if pos == NULL
  135. llama_pos all_pos_1; // used if pos == NULL
  136. llama_seq_id all_seq_id; // used if seq_id == NULL
  137. } llama_batch;
  138. enum llama_model_kv_override_type {
  139. LLAMA_KV_OVERRIDE_INT,
  140. LLAMA_KV_OVERRIDE_FLOAT,
  141. LLAMA_KV_OVERRIDE_BOOL,
  142. };
  143. struct llama_model_kv_override {
  144. char key[128];
  145. enum llama_model_kv_override_type tag;
  146. union {
  147. int64_t int_value;
  148. double float_value;
  149. bool bool_value;
  150. };
  151. };
  152. struct llama_model_params {
  153. int32_t n_gpu_layers; // number of layers to store in VRAM
  154. int32_t main_gpu; // the GPU that is used for scratch and small tensors
  155. const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
  156. // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
  157. // If the provided progress_callback returns true, model loading continues.
  158. // If it returns false, model loading is immediately aborted.
  159. llama_progress_callback progress_callback;
  160. // context pointer passed to the progress callback
  161. void * progress_callback_user_data;
  162. // override key-value pairs of the model meta data
  163. const struct llama_model_kv_override * kv_overrides;
  164. // Keep the booleans together to avoid misalignment during copy-by-value.
  165. bool vocab_only; // only load the vocabulary, no weights
  166. bool use_mmap; // use mmap if possible
  167. bool use_mlock; // force system to keep model in RAM
  168. };
  169. struct llama_context_params {
  170. uint32_t seed; // RNG seed, -1 for random
  171. uint32_t n_ctx; // text context, 0 = from model
  172. uint32_t n_batch; // prompt processing maximum batch size
  173. uint32_t n_threads; // number of threads to use for generation
  174. uint32_t n_threads_batch; // number of threads to use for batch processing
  175. int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
  176. // ref: https://github.com/ggerganov/llama.cpp/pull/2054
  177. float rope_freq_base; // RoPE base frequency, 0 = from model
  178. float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  179. float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
  180. float yarn_attn_factor; // YaRN magnitude scaling factor
  181. float yarn_beta_fast; // YaRN low correction dim
  182. float yarn_beta_slow; // YaRN high correction dim
  183. uint32_t yarn_orig_ctx; // YaRN original context size
  184. enum ggml_type type_k; // data type for K cache
  185. enum ggml_type type_v; // data type for V cache
  186. // Keep the booleans together to avoid misalignment during copy-by-value.
  187. bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
  188. bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
  189. bool embedding; // embedding mode only
  190. bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
  191. };
  192. // model quantization parameters
  193. typedef struct llama_model_quantize_params {
  194. int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
  195. enum llama_ftype ftype; // quantize to this llama_ftype
  196. bool allow_requantize; // allow quantizing non-f32/f16 tensors
  197. bool quantize_output_tensor; // quantize output.weight
  198. bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
  199. bool pure; // disable k-quant mixtures and quantize all tensors to the same type
  200. } llama_model_quantize_params;
  201. // grammar types
  202. struct llama_grammar;
  203. // grammar element type
  204. enum llama_gretype {
  205. // end of rule definition
  206. LLAMA_GRETYPE_END = 0,
  207. // start of alternate definition for rule
  208. LLAMA_GRETYPE_ALT = 1,
  209. // non-terminal element: reference to rule
  210. LLAMA_GRETYPE_RULE_REF = 2,
  211. // terminal element: character (code point)
  212. LLAMA_GRETYPE_CHAR = 3,
  213. // inverse char(s) ([^a], [^a-b] [^abc])
  214. LLAMA_GRETYPE_CHAR_NOT = 4,
  215. // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
  216. // be an inclusive range ([a-z])
  217. LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
  218. // modifies a preceding LLAMA_GRETYPE_CHAR or
  219. // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
  220. LLAMA_GRETYPE_CHAR_ALT = 6,
  221. };
  222. typedef struct llama_grammar_element {
  223. enum llama_gretype type;
  224. uint32_t value; // Unicode code point or rule ID
  225. } llama_grammar_element;
  226. // performance timing information
  227. struct llama_timings {
  228. double t_start_ms;
  229. double t_end_ms;
  230. double t_load_ms;
  231. double t_sample_ms;
  232. double t_p_eval_ms;
  233. double t_eval_ms;
  234. int32_t n_sample;
  235. int32_t n_p_eval;
  236. int32_t n_eval;
  237. };
  238. // Helpers for getting default parameters
  239. LLAMA_API struct llama_model_params llama_model_default_params(void);
  240. LLAMA_API struct llama_context_params llama_context_default_params(void);
  241. LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
  242. // Initialize the llama + ggml backend
  243. // If numa is true, use NUMA optimizations
  244. // Call once at the start of the program
  245. LLAMA_API void llama_backend_init(bool numa);
  246. // Call once at the end of the program - currently only used for MPI
  247. LLAMA_API void llama_backend_free(void);
  248. LLAMA_API struct llama_model * llama_load_model_from_file(
  249. const char * path_model,
  250. struct llama_model_params params);
  251. LLAMA_API void llama_free_model(struct llama_model * model);
  252. LLAMA_API struct llama_context * llama_new_context_with_model(
  253. struct llama_model * model,
  254. struct llama_context_params params);
  255. // Frees all allocated memory
  256. LLAMA_API void llama_free(struct llama_context * ctx);
  257. LLAMA_API int64_t llama_time_us(void);
  258. LLAMA_API int32_t llama_max_devices(void);
  259. LLAMA_API bool llama_mmap_supported (void);
  260. LLAMA_API bool llama_mlock_supported(void);
  261. LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
  262. LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
  263. LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
  264. LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
  265. LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
  266. LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
  267. LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
  268. // Get the model's RoPE frequency scaling factor
  269. LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
  270. // Functions to access the model's GGUF metadata scalar values
  271. // - The functions return the length of the string on success, or -1 on failure
  272. // - The output string is always null-terminated and cleared on failure
  273. // - GGUF array values are not supported by these functions
  274. // Get metadata value as a string by key name
  275. LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
  276. // Get the number of metadata key/value pairs
  277. LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
  278. // Get metadata key name by index
  279. LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
  280. // Get metadata value as a string by index
  281. LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
  282. // Get a string describing the model type
  283. LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
  284. // Returns the total size of all the tensors in the model in bytes
  285. LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
  286. // Returns the total number of parameters in the model
  287. LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
  288. // Get a llama model tensor
  289. LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
  290. // Returns 0 on success
  291. LLAMA_API uint32_t llama_model_quantize(
  292. const char * fname_inp,
  293. const char * fname_out,
  294. const llama_model_quantize_params * params);
  295. // Apply a LoRA adapter to a loaded model
  296. // path_base_model is the path to a higher quality model to use as a base for
  297. // the layers modified by the adapter. Can be NULL to use the current loaded model.
  298. // The model needs to be reloaded before applying a new adapter, otherwise the adapter
  299. // will be applied on top of the previous one
  300. // Returns 0 on success
  301. LLAMA_API DEPRECATED(int32_t llama_apply_lora_from_file(
  302. struct llama_context * ctx,
  303. const char * path_lora,
  304. float scale,
  305. const char * path_base_model,
  306. int32_t n_threads),
  307. "use llama_model_apply_lora_from_file instead");
  308. LLAMA_API int32_t llama_model_apply_lora_from_file(
  309. const struct llama_model * model,
  310. const char * path_lora,
  311. float scale,
  312. const char * path_base_model,
  313. int32_t n_threads);
  314. //
  315. // KV cache
  316. //
  317. // Information associated with an individual cell in the KV cache view.
  318. struct llama_kv_cache_view_cell {
  319. // The position for this cell. Takes KV cache shifts into account.
  320. // May be negative if the cell is not populated.
  321. llama_pos pos;
  322. };
  323. // An updateable view of the KV cache.
  324. struct llama_kv_cache_view {
  325. // Number of KV cache cells. This will be the same as the context size.
  326. int32_t n_cells;
  327. // Maximum number of sequences that can exist in a cell. It's not an error
  328. // if there are more sequences in a cell than this value, however they will
  329. // not be visible in the view cells_sequences.
  330. int32_t n_max_seq;
  331. // Number of tokens in the cache. For example, if there are two populated
  332. // cells, the first with 1 sequence id in it and the second with 2 sequence
  333. // ids then you'll have 3 tokens.
  334. int32_t token_count;
  335. // Number of populated cache cells.
  336. int32_t used_cells;
  337. // Maximum contiguous empty slots in the cache.
  338. int32_t max_contiguous;
  339. // Index to the start of the max_contiguous slot range. Can be negative
  340. // when cache is full.
  341. int32_t max_contiguous_idx;
  342. // Information for an individual cell.
  343. struct llama_kv_cache_view_cell * cells;
  344. // The sequences for each cell. There will be n_max_seq items per cell.
  345. llama_seq_id * cells_sequences;
  346. };
  347. // Create an empty KV cache view. (use only for debugging purposes)
  348. LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq);
  349. // Free a KV cache view. (use only for debugging purposes)
  350. LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
  351. // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
  352. LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
  353. // Returns the number of tokens in the KV cache (slow, use only for debug)
  354. // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
  355. LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
  356. // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
  357. LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
  358. // Clear the KV cache
  359. LLAMA_API void llama_kv_cache_clear(
  360. struct llama_context * ctx);
  361. // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  362. // seq_id < 0 : match any sequence
  363. // p0 < 0 : [0, p1]
  364. // p1 < 0 : [p0, inf)
  365. LLAMA_API void llama_kv_cache_seq_rm(
  366. struct llama_context * ctx,
  367. llama_seq_id seq_id,
  368. llama_pos p0,
  369. llama_pos p1);
  370. // Copy all tokens that belong to the specified sequence to another sequence
  371. // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  372. // p0 < 0 : [0, p1]
  373. // p1 < 0 : [p0, inf)
  374. LLAMA_API void llama_kv_cache_seq_cp(
  375. struct llama_context * ctx,
  376. llama_seq_id seq_id_src,
  377. llama_seq_id seq_id_dst,
  378. llama_pos p0,
  379. llama_pos p1);
  380. // Removes all tokens that do not belong to the specified sequence
  381. LLAMA_API void llama_kv_cache_seq_keep(
  382. struct llama_context * ctx,
  383. llama_seq_id seq_id);
  384. // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  385. // If the KV cache is RoPEd, the KV data is updated accordingly
  386. // p0 < 0 : [0, p1]
  387. // p1 < 0 : [p0, inf)
  388. LLAMA_API void llama_kv_cache_seq_shift(
  389. struct llama_context * ctx,
  390. llama_seq_id seq_id,
  391. llama_pos p0,
  392. llama_pos p1,
  393. llama_pos delta);
  394. //
  395. // State / sessions
  396. //
  397. // Returns the maximum size in bytes of the state (rng, logits, embedding
  398. // and kv_cache) - will often be smaller after compacting tokens
  399. LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
  400. // Copies the state to the specified destination address.
  401. // Destination needs to have allocated enough memory.
  402. // Returns the number of bytes copied
  403. LLAMA_API size_t llama_copy_state_data(
  404. struct llama_context * ctx,
  405. uint8_t * dst);
  406. // Set the state reading from the specified address
  407. // Returns the number of bytes read
  408. LLAMA_API size_t llama_set_state_data(
  409. struct llama_context * ctx,
  410. uint8_t * src);
  411. // Save/load session file
  412. LLAMA_API bool llama_load_session_file(
  413. struct llama_context * ctx,
  414. const char * path_session,
  415. llama_token * tokens_out,
  416. size_t n_token_capacity,
  417. size_t * n_token_count_out);
  418. LLAMA_API bool llama_save_session_file(
  419. struct llama_context * ctx,
  420. const char * path_session,
  421. const llama_token * tokens,
  422. size_t n_token_count);
  423. //
  424. // Decoding
  425. //
  426. // Run the llama inference to obtain the logits and probabilities for the next token(s).
  427. // tokens + n_tokens is the provided batch of new tokens to process
  428. // n_past is the number of tokens to use from previous eval calls
  429. // Returns 0 on success
  430. // DEPRECATED: use llama_decode() instead
  431. LLAMA_API DEPRECATED(int llama_eval(
  432. struct llama_context * ctx,
  433. llama_token * tokens,
  434. int32_t n_tokens,
  435. int32_t n_past),
  436. "use llama_decode() instead");
  437. // Same as llama_eval, but use float matrix input directly.
  438. // DEPRECATED: use llama_decode() instead
  439. LLAMA_API DEPRECATED(int llama_eval_embd(
  440. struct llama_context * ctx,
  441. float * embd,
  442. int32_t n_tokens,
  443. int32_t n_past),
  444. "use llama_decode() instead");
  445. // Return batch for single sequence of tokens starting at pos_0
  446. //
  447. // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
  448. //
  449. LLAMA_API struct llama_batch llama_batch_get_one(
  450. llama_token * tokens,
  451. int32_t n_tokens,
  452. llama_pos pos_0,
  453. llama_seq_id seq_id);
  454. // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
  455. // Each token can be assigned up to n_seq_max sequence ids
  456. // The batch has to be freed with llama_batch_free()
  457. // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  458. // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  459. // The rest of the llama_batch members are allocated with size n_tokens
  460. // All members are left uninitialized
  461. LLAMA_API struct llama_batch llama_batch_init(
  462. int32_t n_tokens,
  463. int32_t embd,
  464. int32_t n_seq_max);
  465. // Frees a batch of tokens allocated with llama_batch_init()
  466. LLAMA_API void llama_batch_free(struct llama_batch batch);
  467. // Positive return values does not mean a fatal error, but rather a warning.
  468. // 0 - success
  469. // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
  470. // < 0 - error
  471. LLAMA_API int32_t llama_decode(
  472. struct llama_context * ctx,
  473. struct llama_batch batch);
  474. // Set the number of threads used for decoding
  475. // n_threads is the number of threads used for generation (single token)
  476. // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
  477. LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
  478. // Token logits obtained from the last call to llama_eval()
  479. // The logits for the last token are stored in the last row
  480. // Logits for which llama_batch.logits[i] == 0 are undefined
  481. // Rows: n_tokens provided with llama_batch
  482. // Cols: n_vocab
  483. LLAMA_API float * llama_get_logits(struct llama_context * ctx);
  484. // Logits for the ith token. Equivalent to:
  485. // llama_get_logits(ctx) + i*n_vocab
  486. LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
  487. // Get the embeddings for the input
  488. // shape: [n_embd] (1-dimensional)
  489. LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
  490. //
  491. // Vocab
  492. //
  493. LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
  494. LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
  495. LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token);
  496. // Special tokens
  497. LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
  498. LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
  499. LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
  500. // Returns -1 if unknown, 1 for true or 0 for false.
  501. LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
  502. // Returns -1 if unknown, 1 for true or 0 for false.
  503. LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
  504. // codellama infill tokens
  505. LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
  506. LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
  507. LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
  508. LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
  509. //
  510. // Tokenization
  511. //
  512. /// @details Convert the provided text into tokens.
  513. /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
  514. /// @return Returns the number of tokens on success, no more than n_max_tokens
  515. /// @return Returns a negative number on failure - the number of tokens that would have been returned
  516. /// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
  517. /// Does not insert a leading space.
  518. LLAMA_API int32_t llama_tokenize(
  519. const struct llama_model * model,
  520. const char * text,
  521. int32_t text_len,
  522. llama_token * tokens,
  523. int32_t n_max_tokens,
  524. bool add_bos,
  525. bool special);
  526. // Token Id -> Piece.
  527. // Uses the vocabulary in the provided context.
  528. // Does not write null terminator to the buffer.
  529. // User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
  530. LLAMA_API int32_t llama_token_to_piece(
  531. const struct llama_model * model,
  532. llama_token token,
  533. char * buf,
  534. int32_t length);
  535. //
  536. // Grammar
  537. //
  538. LLAMA_API struct llama_grammar * llama_grammar_init(
  539. const llama_grammar_element ** rules,
  540. size_t n_rules,
  541. size_t start_rule_index);
  542. LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
  543. LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
  544. //
  545. // Sampling functions
  546. //
  547. // Sets the current rng seed.
  548. LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
  549. /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
  550. /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
  551. LLAMA_API void llama_sample_repetition_penalties(
  552. struct llama_context * ctx,
  553. llama_token_data_array * candidates,
  554. const llama_token * last_tokens,
  555. size_t penalty_last_n,
  556. float penalty_repeat,
  557. float penalty_freq,
  558. float penalty_present);
  559. /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
  560. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
  561. /// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
  562. /// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
  563. LLAMA_API void llama_sample_classifier_free_guidance(
  564. struct llama_context * ctx,
  565. llama_token_data_array * candidates,
  566. struct llama_context * guidance_ctx,
  567. float scale);
  568. /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
  569. LLAMA_API void llama_sample_softmax(
  570. struct llama_context * ctx,
  571. llama_token_data_array * candidates);
  572. /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  573. LLAMA_API void llama_sample_top_k(
  574. struct llama_context * ctx,
  575. llama_token_data_array * candidates,
  576. int32_t k,
  577. size_t min_keep);
  578. /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  579. LLAMA_API void llama_sample_top_p(
  580. struct llama_context * ctx,
  581. llama_token_data_array * candidates,
  582. float p,
  583. size_t min_keep);
  584. /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
  585. LLAMA_API void llama_sample_min_p(
  586. struct llama_context * ctx,
  587. llama_token_data_array * candidates,
  588. float p,
  589. size_t min_keep);
  590. /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
  591. LLAMA_API void llama_sample_tail_free(
  592. struct llama_context * ctx,
  593. llama_token_data_array * candidates,
  594. float z,
  595. size_t min_keep);
  596. /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
  597. LLAMA_API void llama_sample_typical(
  598. struct llama_context * ctx,
  599. llama_token_data_array * candidates,
  600. float p,
  601. size_t min_keep);
  602. LLAMA_API void llama_sample_temp(
  603. struct llama_context * ctx,
  604. llama_token_data_array * candidates,
  605. float temp);
  606. LLAMA_API DEPRECATED(void llama_sample_temperature(
  607. struct llama_context * ctx,
  608. llama_token_data_array * candidates,
  609. float temp),
  610. "use llama_sample_temp instead");
  611. /// @details Apply constraints from grammar
  612. LLAMA_API void llama_sample_grammar(
  613. struct llama_context * ctx,
  614. llama_token_data_array * candidates,
  615. const struct llama_grammar * grammar);
  616. /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  617. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  618. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  619. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  620. /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
  621. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  622. LLAMA_API llama_token llama_sample_token_mirostat(
  623. struct llama_context * ctx,
  624. llama_token_data_array * candidates,
  625. float tau,
  626. float eta,
  627. int32_t m,
  628. float * mu);
  629. /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  630. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  631. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  632. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  633. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  634. LLAMA_API llama_token llama_sample_token_mirostat_v2(
  635. struct llama_context * ctx,
  636. llama_token_data_array * candidates,
  637. float tau,
  638. float eta,
  639. float * mu);
  640. /// @details Selects the token with the highest probability.
  641. /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
  642. LLAMA_API llama_token llama_sample_token_greedy(
  643. struct llama_context * ctx,
  644. llama_token_data_array * candidates);
  645. /// @details Randomly selects a token from the candidates based on their probabilities.
  646. LLAMA_API llama_token llama_sample_token(
  647. struct llama_context * ctx,
  648. llama_token_data_array * candidates);
  649. /// @details Accepts the sampled token into the grammar
  650. LLAMA_API void llama_grammar_accept_token(
  651. struct llama_context * ctx,
  652. struct llama_grammar * grammar,
  653. llama_token token);
  654. //
  655. // Beam search
  656. //
  657. struct llama_beam_view {
  658. const llama_token * tokens;
  659. size_t n_tokens;
  660. float p; // Cumulative beam probability (renormalized relative to all beams)
  661. bool eob; // Callback should set this to true when a beam is at end-of-beam.
  662. };
  663. // Passed to beam_search_callback function.
  664. // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams
  665. // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks.
  666. // These pointers are valid only during the synchronous callback, so should not be saved.
  667. struct llama_beams_state {
  668. struct llama_beam_view * beam_views;
  669. size_t n_beams; // Number of elements in beam_views[].
  670. size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
  671. bool last_call; // True iff this is the last callback invocation.
  672. };
  673. // Type of pointer to the beam_search_callback function.
  674. // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
  675. // passed back to beam_search_callback. This avoids having to use global variables in the callback.
  676. typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, struct llama_beams_state);
  677. /// @details Deterministically returns entire sentence constructed by a beam search.
  678. /// @param ctx Pointer to the llama_context.
  679. /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state.
  680. /// @param callback_data A pointer that is simply passed back to callback.
  681. /// @param n_beams Number of beams to use.
  682. /// @param n_past Number of tokens already evaluated.
  683. /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
  684. LLAMA_API void llama_beam_search(
  685. struct llama_context * ctx,
  686. llama_beam_search_callback_fn_t callback,
  687. void * callback_data,
  688. size_t n_beams,
  689. int32_t n_past,
  690. int32_t n_predict);
  691. // Performance information
  692. LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
  693. LLAMA_API void llama_print_timings(struct llama_context * ctx);
  694. LLAMA_API void llama_reset_timings(struct llama_context * ctx);
  695. // Print system information
  696. LLAMA_API const char * llama_print_system_info(void);
  697. // Set callback for all future logging events.
  698. // If this is not called, or NULL is supplied, everything is output on stderr.
  699. LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
  700. LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
  701. #ifdef __cplusplus
  702. }
  703. #endif
  704. // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
  705. #ifdef LLAMA_API_INTERNAL
  706. #include <vector>
  707. #include <string>
  708. struct ggml_tensor;
  709. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  710. struct llama_context * ctx
  711. );
  712. #endif // LLAMA_API_INTERNAL
  713. #endif // LLAMA_H