llama.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. #ifndef LLAMA_H
  2. #define LLAMA_H
  3. #include "ggml.h"
  4. #ifdef GGML_USE_CUBLAS
  5. #include "ggml-cuda.h"
  6. #define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
  7. #else
  8. #define LLAMA_MAX_DEVICES 1
  9. #endif // GGML_USE_CUBLAS
  10. #include <stddef.h>
  11. #include <stdint.h>
  12. #include <stdio.h>
  13. #include <stdbool.h>
  14. #ifdef LLAMA_SHARED
  15. # if defined(_WIN32) && !defined(__MINGW32__)
  16. # ifdef LLAMA_BUILD
  17. # define LLAMA_API __declspec(dllexport)
  18. # else
  19. # define LLAMA_API __declspec(dllimport)
  20. # endif
  21. # else
  22. # define LLAMA_API __attribute__ ((visibility ("default")))
  23. # endif
  24. #else
  25. # define LLAMA_API
  26. #endif
  27. #ifdef __GNUC__
  28. # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  29. #elif defined(_MSC_VER)
  30. # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  31. #else
  32. # define DEPRECATED(func, hint) func
  33. #endif
  34. #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
  35. #define LLAMA_MAX_RNG_STATE (64*1024)
  36. #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
  37. #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
  38. #define LLAMA_SESSION_VERSION 2
  39. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
  40. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  41. #define LLAMA_SUPPORTS_GPU_OFFLOAD
  42. #endif
  43. #ifdef __cplusplus
  44. extern "C" {
  45. #endif
  46. //
  47. // C interface
  48. //
  49. // TODO: show sample usage
  50. //
  51. struct llama_model;
  52. struct llama_context;
  53. typedef int32_t llama_pos;
  54. typedef int32_t llama_token;
  55. typedef int32_t llama_seq_id;
  56. enum llama_vocab_type {
  57. LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
  58. LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
  59. };
  60. enum llama_token_type {
  61. LLAMA_TOKEN_TYPE_UNDEFINED = 0,
  62. LLAMA_TOKEN_TYPE_NORMAL = 1,
  63. LLAMA_TOKEN_TYPE_UNKNOWN = 2,
  64. LLAMA_TOKEN_TYPE_CONTROL = 3,
  65. LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
  66. LLAMA_TOKEN_TYPE_UNUSED = 5,
  67. LLAMA_TOKEN_TYPE_BYTE = 6,
  68. };
  69. // model file types
  70. enum llama_ftype {
  71. LLAMA_FTYPE_ALL_F32 = 0,
  72. LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  73. LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  74. LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  75. LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  76. // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
  77. // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
  78. LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  79. LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  80. LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  81. LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  82. LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
  83. LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
  84. LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
  85. LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
  86. LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
  87. LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
  88. LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
  89. LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
  90. LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
  91. };
  92. enum llama_rope_scaling_type {
  93. LLAMA_ROPE_SCALING_UNSPECIFIED = -1,
  94. LLAMA_ROPE_SCALING_NONE = 0,
  95. LLAMA_ROPE_SCALING_LINEAR = 1,
  96. LLAMA_ROPE_SCALING_YARN = 2,
  97. LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
  98. };
  99. typedef struct llama_token_data {
  100. llama_token id; // token id
  101. float logit; // log-odds of the token
  102. float p; // probability of the token
  103. } llama_token_data;
  104. typedef struct llama_token_data_array {
  105. llama_token_data * data;
  106. size_t size;
  107. bool sorted;
  108. } llama_token_data_array;
  109. typedef void (*llama_progress_callback)(float progress, void *ctx);
  110. // Input data for llama_decode
  111. // A llama_batch object can contain input about one or many sequences
  112. // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
  113. //
  114. // - token : the token ids of the input (used when embd is NULL)
  115. // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
  116. // - pos : the positions of the respective token in the sequence
  117. // - seq_id : the sequence to which the respective token belongs
  118. // - logits : if zero, the logits for the respective token will not be output
  119. //
  120. typedef struct llama_batch {
  121. int32_t n_tokens;
  122. llama_token * token;
  123. float * embd;
  124. llama_pos * pos;
  125. int32_t * n_seq_id;
  126. llama_seq_id ** seq_id;
  127. int8_t * logits;
  128. // NOTE: helpers for smooth API transition - can be deprecated in the future
  129. // for future-proof code, use the above fields instead and ignore everything below
  130. //
  131. // pos[i] = all_pos_0 + i*all_pos_1
  132. //
  133. llama_pos all_pos_0; // used if pos == NULL
  134. llama_pos all_pos_1; // used if pos == NULL
  135. llama_seq_id all_seq_id; // used if seq_id == NULL
  136. } llama_batch;
  137. struct llama_model_params {
  138. int32_t n_gpu_layers; // number of layers to store in VRAM
  139. int32_t main_gpu; // the GPU that is used for scratch and small tensors
  140. const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
  141. // called with a progress value between 0 and 1, pass NULL to disable
  142. llama_progress_callback progress_callback;
  143. // context pointer passed to the progress callback
  144. void * progress_callback_user_data;
  145. // Keep the booleans together to avoid misalignment during copy-by-value.
  146. bool vocab_only; // only load the vocabulary, no weights
  147. bool use_mmap; // use mmap if possible
  148. bool use_mlock; // force system to keep model in RAM
  149. };
  150. struct llama_context_params {
  151. uint32_t seed; // RNG seed, -1 for random
  152. uint32_t n_ctx; // text context, 0 = from model
  153. uint32_t n_batch; // prompt processing maximum batch size
  154. uint32_t n_threads; // number of threads to use for generation
  155. uint32_t n_threads_batch; // number of threads to use for batch processing
  156. int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
  157. // ref: https://github.com/ggerganov/llama.cpp/pull/2054
  158. float rope_freq_base; // RoPE base frequency, 0 = from model
  159. float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  160. float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
  161. float yarn_attn_factor; // YaRN magnitude scaling factor
  162. float yarn_beta_fast; // YaRN low correction dim
  163. float yarn_beta_slow; // YaRN high correction dim
  164. uint32_t yarn_orig_ctx; // YaRN original context size
  165. // Keep the booleans together to avoid misalignment during copy-by-value.
  166. bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
  167. bool f16_kv; // use fp16 for KV cache, fp32 otherwise
  168. bool logits_all; // the llama_eval() call computes all logits, not just the last one
  169. bool embedding; // embedding mode only
  170. };
  171. // model quantization parameters
  172. typedef struct llama_model_quantize_params {
  173. int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
  174. enum llama_ftype ftype; // quantize to this llama_ftype
  175. bool allow_requantize; // allow quantizing non-f32/f16 tensors
  176. bool quantize_output_tensor; // quantize output.weight
  177. bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
  178. bool pure; // disable k-quant mixtures and quantize all tensors to the same type
  179. } llama_model_quantize_params;
  180. // grammar types
  181. struct llama_grammar;
  182. // grammar element type
  183. enum llama_gretype {
  184. // end of rule definition
  185. LLAMA_GRETYPE_END = 0,
  186. // start of alternate definition for rule
  187. LLAMA_GRETYPE_ALT = 1,
  188. // non-terminal element: reference to rule
  189. LLAMA_GRETYPE_RULE_REF = 2,
  190. // terminal element: character (code point)
  191. LLAMA_GRETYPE_CHAR = 3,
  192. // inverse char(s) ([^a], [^a-b] [^abc])
  193. LLAMA_GRETYPE_CHAR_NOT = 4,
  194. // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
  195. // be an inclusive range ([a-z])
  196. LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
  197. // modifies a preceding LLAMA_GRETYPE_CHAR or
  198. // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
  199. LLAMA_GRETYPE_CHAR_ALT = 6,
  200. };
  201. typedef struct llama_grammar_element {
  202. enum llama_gretype type;
  203. uint32_t value; // Unicode code point or rule ID
  204. } llama_grammar_element;
  205. // performance timing information
  206. struct llama_timings {
  207. double t_start_ms;
  208. double t_end_ms;
  209. double t_load_ms;
  210. double t_sample_ms;
  211. double t_p_eval_ms;
  212. double t_eval_ms;
  213. int32_t n_sample;
  214. int32_t n_p_eval;
  215. int32_t n_eval;
  216. };
  217. // Helpers for getting default parameters
  218. LLAMA_API struct llama_model_params llama_model_default_params(void);
  219. LLAMA_API struct llama_context_params llama_context_default_params(void);
  220. LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
  221. // Initialize the llama + ggml backend
  222. // If numa is true, use NUMA optimizations
  223. // Call once at the start of the program
  224. LLAMA_API void llama_backend_init(bool numa);
  225. // Call once at the end of the program - currently only used for MPI
  226. LLAMA_API void llama_backend_free(void);
  227. LLAMA_API struct llama_model * llama_load_model_from_file(
  228. const char * path_model,
  229. struct llama_model_params params);
  230. LLAMA_API void llama_free_model(struct llama_model * model);
  231. LLAMA_API struct llama_context * llama_new_context_with_model(
  232. struct llama_model * model,
  233. struct llama_context_params params);
  234. // Frees all allocated memory
  235. LLAMA_API void llama_free(struct llama_context * ctx);
  236. LLAMA_API int64_t llama_time_us(void);
  237. LLAMA_API int llama_max_devices (void);
  238. LLAMA_API bool llama_mmap_supported (void);
  239. LLAMA_API bool llama_mlock_supported(void);
  240. LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
  241. LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
  242. LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
  243. LLAMA_API int llama_n_vocab (const struct llama_model * model);
  244. LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
  245. LLAMA_API int llama_n_embd (const struct llama_model * model);
  246. // Get the model's RoPE frequency scaling factor
  247. LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
  248. // Functions to access the model's GGUF metadata scalar values
  249. // - The functions return the length of the string on success, or -1 on failure
  250. // - The output string is always null-terminated and cleared on failure
  251. // - GGUF array values are not supported by these functions
  252. // Get metadata value as a string by key name
  253. LLAMA_API int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
  254. // Get the number of metadata key/value pairs
  255. LLAMA_API int llama_model_meta_count(const struct llama_model * model);
  256. // Get metadata key name by index
  257. LLAMA_API int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
  258. // Get metadata value as a string by index
  259. LLAMA_API int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
  260. // Get a string describing the model type
  261. LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
  262. // Returns the total size of all the tensors in the model in bytes
  263. LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
  264. // Returns the total number of parameters in the model
  265. LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
  266. // Get a llama model tensor
  267. LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
  268. // Returns 0 on success
  269. LLAMA_API int llama_model_quantize(
  270. const char * fname_inp,
  271. const char * fname_out,
  272. const llama_model_quantize_params * params);
  273. // Apply a LoRA adapter to a loaded model
  274. // path_base_model is the path to a higher quality model to use as a base for
  275. // the layers modified by the adapter. Can be NULL to use the current loaded model.
  276. // The model needs to be reloaded before applying a new adapter, otherwise the adapter
  277. // will be applied on top of the previous one
  278. // Returns 0 on success
  279. LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
  280. struct llama_context * ctx,
  281. const char * path_lora,
  282. float scale,
  283. const char * path_base_model,
  284. int n_threads),
  285. "use llama_model_apply_lora_from_file instead");
  286. LLAMA_API int llama_model_apply_lora_from_file(
  287. const struct llama_model * model,
  288. const char * path_lora,
  289. float scale,
  290. const char * path_base_model,
  291. int n_threads);
  292. //
  293. // KV cache
  294. //
  295. // Information associated with an individual cell in the KV cache view.
  296. struct llama_kv_cache_view_cell {
  297. // The position for this cell. Takes KV cache shifts into account.
  298. // May be negative if the cell is not populated.
  299. llama_pos pos;
  300. };
  301. // An updateable view of the KV cache.
  302. struct llama_kv_cache_view {
  303. // Number of KV cache cells. This will be the same as the context size.
  304. int32_t n_cells;
  305. // Maximum number of sequences that can exist in a cell. It's not an error
  306. // if there are more sequences in a cell than this value, however they will
  307. // not be visible in the view cells_sequences.
  308. int32_t n_max_seq;
  309. // Number of tokens in the cache. For example, if there are two populated
  310. // cells, the first with 1 sequence id in it and the second with 2 sequence
  311. // ids then you'll have 3 tokens.
  312. int32_t token_count;
  313. // Number of populated cache cells.
  314. int32_t used_cells;
  315. // Maximum contiguous empty slots in the cache.
  316. int32_t max_contiguous;
  317. // Index to the start of the max_contiguous slot range. Can be negative
  318. // when cache is full.
  319. int32_t max_contiguous_idx;
  320. // Information for an individual cell.
  321. struct llama_kv_cache_view_cell * cells;
  322. // The sequences for each cell. There will be n_max_seq items per cell.
  323. llama_seq_id * cells_sequences;
  324. };
  325. // Create an empty KV cache view. (use only for debugging purposes)
  326. LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq);
  327. // Free a KV cache view. (use only for debugging purposes)
  328. LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
  329. // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
  330. LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
  331. // Returns the number of tokens in the KV cache (slow, use only for debug)
  332. // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
  333. LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
  334. // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
  335. LLAMA_API int llama_get_kv_cache_used_cells(const struct llama_context * ctx);
  336. // Clear the KV cache
  337. LLAMA_API void llama_kv_cache_clear(
  338. struct llama_context * ctx);
  339. // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  340. // seq_id < 0 : match any sequence
  341. // p0 < 0 : [0, p1]
  342. // p1 < 0 : [p0, inf)
  343. LLAMA_API void llama_kv_cache_seq_rm(
  344. struct llama_context * ctx,
  345. llama_seq_id seq_id,
  346. llama_pos p0,
  347. llama_pos p1);
  348. // Copy all tokens that belong to the specified sequence to another sequence
  349. // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  350. // p0 < 0 : [0, p1]
  351. // p1 < 0 : [p0, inf)
  352. LLAMA_API void llama_kv_cache_seq_cp(
  353. struct llama_context * ctx,
  354. llama_seq_id seq_id_src,
  355. llama_seq_id seq_id_dst,
  356. llama_pos p0,
  357. llama_pos p1);
  358. // Removes all tokens that do not belong to the specified sequence
  359. LLAMA_API void llama_kv_cache_seq_keep(
  360. struct llama_context * ctx,
  361. llama_seq_id seq_id);
  362. // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  363. // If the KV cache is RoPEd, the KV data is updated accordingly
  364. // p0 < 0 : [0, p1]
  365. // p1 < 0 : [p0, inf)
  366. LLAMA_API void llama_kv_cache_seq_shift(
  367. struct llama_context * ctx,
  368. llama_seq_id seq_id,
  369. llama_pos p0,
  370. llama_pos p1,
  371. llama_pos delta);
  372. //
  373. // State / sessions
  374. //
  375. // Returns the maximum size in bytes of the state (rng, logits, embedding
  376. // and kv_cache) - will often be smaller after compacting tokens
  377. LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
  378. // Copies the state to the specified destination address.
  379. // Destination needs to have allocated enough memory.
  380. // Returns the number of bytes copied
  381. LLAMA_API size_t llama_copy_state_data(
  382. struct llama_context * ctx,
  383. uint8_t * dst);
  384. // Set the state reading from the specified address
  385. // Returns the number of bytes read
  386. LLAMA_API size_t llama_set_state_data(
  387. struct llama_context * ctx,
  388. uint8_t * src);
  389. // Save/load session file
  390. LLAMA_API bool llama_load_session_file(
  391. struct llama_context * ctx,
  392. const char * path_session,
  393. llama_token * tokens_out,
  394. size_t n_token_capacity,
  395. size_t * n_token_count_out);
  396. LLAMA_API bool llama_save_session_file(
  397. struct llama_context * ctx,
  398. const char * path_session,
  399. const llama_token * tokens,
  400. size_t n_token_count);
  401. //
  402. // Decoding
  403. //
  404. // Run the llama inference to obtain the logits and probabilities for the next token(s).
  405. // tokens + n_tokens is the provided batch of new tokens to process
  406. // n_past is the number of tokens to use from previous eval calls
  407. // Returns 0 on success
  408. // DEPRECATED: use llama_decode() instead
  409. LLAMA_API DEPRECATED(int llama_eval(
  410. struct llama_context * ctx,
  411. llama_token * tokens,
  412. int32_t n_tokens,
  413. int n_past),
  414. "use llama_decode() instead");
  415. // Same as llama_eval, but use float matrix input directly.
  416. // DEPRECATED: use llama_decode() instead
  417. LLAMA_API DEPRECATED(int llama_eval_embd(
  418. struct llama_context * ctx,
  419. float * embd,
  420. int32_t n_tokens,
  421. int n_past),
  422. "use llama_decode() instead");
  423. // Return batch for single sequence of tokens starting at pos_0
  424. //
  425. // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
  426. //
  427. LLAMA_API struct llama_batch llama_batch_get_one(
  428. llama_token * tokens,
  429. int32_t n_tokens,
  430. llama_pos pos_0,
  431. llama_seq_id seq_id);
  432. // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
  433. // Each token can be assigned up to n_seq_max sequence ids
  434. // The batch has to be freed with llama_batch_free()
  435. // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  436. // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  437. // The rest of the llama_batch members are allocated with size n_tokens
  438. // All members are left uninitialized
  439. LLAMA_API struct llama_batch llama_batch_init(
  440. int32_t n_tokens,
  441. int32_t embd,
  442. int32_t n_seq_max);
  443. // Frees a batch of tokens allocated with llama_batch_init()
  444. LLAMA_API void llama_batch_free(struct llama_batch batch);
  445. // Positive return values does not mean a fatal error, but rather a warning.
  446. // 0 - success
  447. // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
  448. // < 0 - error
  449. LLAMA_API int llama_decode(
  450. struct llama_context * ctx,
  451. struct llama_batch batch);
  452. // Set the number of threads used for decoding
  453. // n_threads is the number of threads used for generation (single token)
  454. // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
  455. LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
  456. // Token logits obtained from the last call to llama_eval()
  457. // The logits for the last token are stored in the last row
  458. // Logits for which llama_batch.logits[i] == 0 are undefined
  459. // Rows: n_tokens provided with llama_batch
  460. // Cols: n_vocab
  461. LLAMA_API float * llama_get_logits(struct llama_context * ctx);
  462. // Logits for the ith token. Equivalent to:
  463. // llama_get_logits(ctx) + i*n_vocab
  464. LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
  465. // Get the embeddings for the input
  466. // shape: [n_embd] (1-dimensional)
  467. LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
  468. //
  469. // Vocab
  470. //
  471. LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
  472. LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
  473. LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token);
  474. // Special tokens
  475. LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
  476. LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
  477. LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
  478. // Returns -1 if unknown, 1 for true or 0 for false.
  479. LLAMA_API int llama_add_bos_token(const struct llama_model * model);
  480. // Returns -1 if unknown, 1 for true or 0 for false.
  481. LLAMA_API int llama_add_eos_token(const struct llama_model * model);
  482. // codellama infill tokens
  483. LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
  484. LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
  485. LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
  486. LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
  487. //
  488. // Tokenization
  489. //
  490. /// @details Convert the provided text into tokens.
  491. /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
  492. /// @return Returns the number of tokens on success, no more than n_max_tokens
  493. /// @return Returns a negative number on failure - the number of tokens that would have been returned
  494. /// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
  495. /// Does not insert a leading space.
  496. LLAMA_API int llama_tokenize(
  497. const struct llama_model * model,
  498. const char * text,
  499. int text_len,
  500. llama_token * tokens,
  501. int n_max_tokens,
  502. bool add_bos,
  503. bool special);
  504. // Token Id -> Piece.
  505. // Uses the vocabulary in the provided context.
  506. // Does not write null terminator to the buffer.
  507. // User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
  508. LLAMA_API int llama_token_to_piece(
  509. const struct llama_model * model,
  510. llama_token token,
  511. char * buf,
  512. int length);
  513. //
  514. // Grammar
  515. //
  516. LLAMA_API struct llama_grammar * llama_grammar_init(
  517. const llama_grammar_element ** rules,
  518. size_t n_rules,
  519. size_t start_rule_index);
  520. LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
  521. LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
  522. //
  523. // Sampling functions
  524. //
  525. // Sets the current rng seed.
  526. LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
  527. /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
  528. /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
  529. LLAMA_API void llama_sample_repetition_penalties(
  530. struct llama_context * ctx,
  531. llama_token_data_array * candidates,
  532. const llama_token * last_tokens,
  533. size_t penalty_last_n,
  534. float penalty_repeat,
  535. float penalty_freq,
  536. float penalty_present);
  537. /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
  538. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
  539. /// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
  540. /// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
  541. LLAMA_API void llama_sample_classifier_free_guidance(
  542. struct llama_context * ctx,
  543. llama_token_data_array * candidates,
  544. struct llama_context * guidance_ctx,
  545. float scale);
  546. /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
  547. LLAMA_API void llama_sample_softmax(
  548. struct llama_context * ctx,
  549. llama_token_data_array * candidates);
  550. /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  551. LLAMA_API void llama_sample_top_k(
  552. struct llama_context * ctx,
  553. llama_token_data_array * candidates,
  554. int k,
  555. size_t min_keep);
  556. /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  557. LLAMA_API void llama_sample_top_p(
  558. struct llama_context * ctx,
  559. llama_token_data_array * candidates,
  560. float p,
  561. size_t min_keep);
  562. /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
  563. LLAMA_API void llama_sample_min_p(
  564. struct llama_context * ctx,
  565. llama_token_data_array * candidates,
  566. float p,
  567. size_t min_keep);
  568. /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
  569. LLAMA_API void llama_sample_tail_free(
  570. struct llama_context * ctx,
  571. llama_token_data_array * candidates,
  572. float z,
  573. size_t min_keep);
  574. /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
  575. LLAMA_API void llama_sample_typical(
  576. struct llama_context * ctx,
  577. llama_token_data_array * candidates,
  578. float p,
  579. size_t min_keep);
  580. LLAMA_API void llama_sample_temp(
  581. struct llama_context * ctx,
  582. llama_token_data_array * candidates,
  583. float temp);
  584. LLAMA_API DEPRECATED(void llama_sample_temperature(
  585. struct llama_context * ctx,
  586. llama_token_data_array * candidates,
  587. float temp),
  588. "use llama_sample_temp instead");
  589. /// @details Apply constraints from grammar
  590. LLAMA_API void llama_sample_grammar(
  591. struct llama_context * ctx,
  592. llama_token_data_array * candidates,
  593. const struct llama_grammar * grammar);
  594. /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  595. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  596. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  597. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  598. /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
  599. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  600. LLAMA_API llama_token llama_sample_token_mirostat(
  601. struct llama_context * ctx,
  602. llama_token_data_array * candidates,
  603. float tau,
  604. float eta,
  605. int m,
  606. float * mu);
  607. /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  608. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  609. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  610. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  611. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  612. LLAMA_API llama_token llama_sample_token_mirostat_v2(
  613. struct llama_context * ctx,
  614. llama_token_data_array * candidates,
  615. float tau,
  616. float eta,
  617. float * mu);
  618. /// @details Selects the token with the highest probability.
  619. /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
  620. LLAMA_API llama_token llama_sample_token_greedy(
  621. struct llama_context * ctx,
  622. llama_token_data_array * candidates);
  623. /// @details Randomly selects a token from the candidates based on their probabilities.
  624. LLAMA_API llama_token llama_sample_token(
  625. struct llama_context * ctx,
  626. llama_token_data_array * candidates);
  627. /// @details Accepts the sampled token into the grammar
  628. LLAMA_API void llama_grammar_accept_token(
  629. struct llama_context * ctx,
  630. struct llama_grammar * grammar,
  631. llama_token token);
  632. //
  633. // Beam search
  634. //
  635. struct llama_beam_view {
  636. const llama_token * tokens;
  637. size_t n_tokens;
  638. float p; // Cumulative beam probability (renormalized relative to all beams)
  639. bool eob; // Callback should set this to true when a beam is at end-of-beam.
  640. };
  641. // Passed to beam_search_callback function.
  642. // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams
  643. // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks.
  644. // These pointers are valid only during the synchronous callback, so should not be saved.
  645. struct llama_beams_state {
  646. struct llama_beam_view * beam_views;
  647. size_t n_beams; // Number of elements in beam_views[].
  648. size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
  649. bool last_call; // True iff this is the last callback invocation.
  650. };
  651. // Type of pointer to the beam_search_callback function.
  652. // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
  653. // passed back to beam_search_callback. This avoids having to use global variables in the callback.
  654. typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, struct llama_beams_state);
  655. /// @details Deterministically returns entire sentence constructed by a beam search.
  656. /// @param ctx Pointer to the llama_context.
  657. /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state.
  658. /// @param callback_data A pointer that is simply passed back to callback.
  659. /// @param n_beams Number of beams to use.
  660. /// @param n_past Number of tokens already evaluated.
  661. /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
  662. LLAMA_API void llama_beam_search(
  663. struct llama_context * ctx,
  664. llama_beam_search_callback_fn_t callback,
  665. void * callback_data,
  666. size_t n_beams,
  667. int n_past,
  668. int n_predict);
  669. // Performance information
  670. LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
  671. LLAMA_API void llama_print_timings(struct llama_context * ctx);
  672. LLAMA_API void llama_reset_timings(struct llama_context * ctx);
  673. // Print system information
  674. LLAMA_API const char * llama_print_system_info(void);
  675. // Set callback for all future logging events.
  676. // If this is not called, or NULL is supplied, everything is output on stderr.
  677. LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
  678. LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
  679. #ifdef __cplusplus
  680. }
  681. #endif
  682. // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
  683. #ifdef LLAMA_API_INTERNAL
  684. #include <vector>
  685. #include <string>
  686. struct ggml_tensor;
  687. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  688. struct llama_context * ctx
  689. );
  690. #endif // LLAMA_API_INTERNAL
  691. #endif // LLAMA_H