llama.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. #ifndef LLAMA_H
  2. #define LLAMA_H
  3. #include "ggml.h"
  4. #ifdef GGML_USE_CUBLAS
  5. #include "ggml-cuda.h"
  6. #define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
  7. #else
  8. #define LLAMA_MAX_DEVICES 1
  9. #endif // GGML_USE_CUBLAS
  10. #include <stddef.h>
  11. #include <stdint.h>
  12. #include <stdbool.h>
  13. #ifdef LLAMA_SHARED
  14. # if defined(_WIN32) && !defined(__MINGW32__)
  15. # ifdef LLAMA_BUILD
  16. # define LLAMA_API __declspec(dllexport)
  17. # else
  18. # define LLAMA_API __declspec(dllimport)
  19. # endif
  20. # else
  21. # define LLAMA_API __attribute__ ((visibility ("default")))
  22. # endif
  23. #else
  24. # define LLAMA_API
  25. #endif
  26. #ifdef __GNUC__
  27. # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  28. #elif defined(_MSC_VER)
  29. # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  30. #else
  31. # define DEPRECATED(func, hint) func
  32. #endif
  33. #define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
  34. #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
  35. #define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
  36. #define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
  37. #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
  38. #define LLAMA_FILE_VERSION 3
  39. #define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT
  40. #define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML
  41. #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
  42. #define LLAMA_SESSION_VERSION 1
  43. #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
  44. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
  45. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  46. #define LLAMA_SUPPORTS_GPU_OFFLOAD
  47. #endif
  48. #ifdef __cplusplus
  49. extern "C" {
  50. #endif
  51. //
  52. // C interface
  53. //
  54. // TODO: show sample usage
  55. //
  56. struct llama_model;
  57. struct llama_context;
  58. typedef int llama_token;
  59. typedef struct llama_token_data {
  60. llama_token id; // token id
  61. float logit; // log-odds of the token
  62. float p; // probability of the token
  63. } llama_token_data;
  64. typedef struct llama_token_data_array {
  65. llama_token_data * data;
  66. size_t size;
  67. bool sorted;
  68. } llama_token_data_array;
  69. typedef void (*llama_progress_callback)(float progress, void *ctx);
  70. struct llama_context_params {
  71. uint32_t seed; // RNG seed, -1 for random
  72. int32_t n_ctx; // text context
  73. int32_t n_batch; // prompt processing batch size
  74. int32_t n_gpu_layers; // number of layers to store in VRAM
  75. int32_t main_gpu; // the GPU that is used for scratch and small tensors
  76. float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
  77. // called with a progress value between 0 and 1, pass NULL to disable
  78. llama_progress_callback progress_callback;
  79. // context pointer passed to the progress callback
  80. void * progress_callback_user_data;
  81. // Keep the booleans together to avoid misalignment during copy-by-value.
  82. bool low_vram; // if true, reduce VRAM usage at the cost of performance
  83. bool f16_kv; // use fp16 for KV cache
  84. bool logits_all; // the llama_eval() call computes all logits, not just the last one
  85. bool vocab_only; // only load the vocabulary, no weights
  86. bool use_mmap; // use mmap if possible
  87. bool use_mlock; // force system to keep model in RAM
  88. bool embedding; // embedding mode only
  89. };
  90. // model file types
  91. enum llama_ftype {
  92. LLAMA_FTYPE_ALL_F32 = 0,
  93. LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  94. LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  95. LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  96. LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  97. // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
  98. // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
  99. LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  100. LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  101. LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  102. LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
  103. LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
  104. LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
  105. LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
  106. LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
  107. LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
  108. LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
  109. LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
  110. LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
  111. };
  112. // model quantization parameters
  113. typedef struct llama_model_quantize_params {
  114. int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
  115. enum llama_ftype ftype; // quantize to this llama_ftype
  116. bool allow_requantize; // allow quantizing non-f32/f16 tensors
  117. bool quantize_output_tensor; // quantize output.weight
  118. } llama_model_quantize_params;
  119. LLAMA_API struct llama_context_params llama_context_default_params();
  120. LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
  121. LLAMA_API bool llama_mmap_supported();
  122. LLAMA_API bool llama_mlock_supported();
  123. // TODO: not great API - very likely to change
  124. // Initialize the llama + ggml backend
  125. // If numa is true, use NUMA optimizations
  126. // Call once at the start of the program
  127. LLAMA_API void llama_init_backend(bool numa);
  128. LLAMA_API int64_t llama_time_us();
  129. LLAMA_API struct llama_model * llama_load_model_from_file(
  130. const char * path_model,
  131. struct llama_context_params params);
  132. LLAMA_API void llama_free_model(struct llama_model * model);
  133. LLAMA_API struct llama_context * llama_new_context_with_model(
  134. struct llama_model * model,
  135. struct llama_context_params params);
  136. // Various functions for loading a ggml llama model.
  137. // Allocate (almost) all memory needed for the model.
  138. // Return NULL on failure
  139. LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file(
  140. const char * path_model,
  141. struct llama_context_params params),
  142. "please use llama_load_model_from_file combined with llama_new_context_with_model instead");
  143. // Frees all allocated memory
  144. LLAMA_API void llama_free(struct llama_context * ctx);
  145. // Returns 0 on success
  146. LLAMA_API int llama_model_quantize(
  147. const char * fname_inp,
  148. const char * fname_out,
  149. const llama_model_quantize_params * params);
  150. // Apply a LoRA adapter to a loaded model
  151. // path_base_model is the path to a higher quality model to use as a base for
  152. // the layers modified by the adapter. Can be NULL to use the current loaded model.
  153. // The model needs to be reloaded before applying a new adapter, otherwise the adapter
  154. // will be applied on top of the previous one
  155. // Returns 0 on success
  156. LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
  157. struct llama_context * ctx,
  158. const char * path_lora,
  159. const char * path_base_model,
  160. int n_threads),
  161. "please use llama_model_apply_lora_from_file instead");
  162. LLAMA_API int llama_model_apply_lora_from_file(
  163. const struct llama_model * model,
  164. const char * path_lora,
  165. const char * path_base_model,
  166. int n_threads);
  167. // Returns the number of tokens in the KV cache
  168. LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
  169. // Sets the current rng seed.
  170. LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
  171. // Returns the maximum size in bytes of the state (rng, logits, embedding
  172. // and kv_cache) - will often be smaller after compacting tokens
  173. LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
  174. // Copies the state to the specified destination address.
  175. // Destination needs to have allocated enough memory.
  176. // Returns the number of bytes copied
  177. LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
  178. // Set the state reading from the specified address
  179. // Returns the number of bytes read
  180. LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
  181. // Save/load session file
  182. LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
  183. LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
  184. // Run the llama inference to obtain the logits and probabilities for the next token.
  185. // tokens + n_tokens is the provided batch of new tokens to process
  186. // n_past is the number of tokens to use from previous eval calls
  187. // Returns 0 on success
  188. LLAMA_API int llama_eval(
  189. struct llama_context * ctx,
  190. const llama_token * tokens,
  191. int n_tokens,
  192. int n_past,
  193. int n_threads);
  194. // Same as llama_eval, but use float matrix input directly.
  195. LLAMA_API int llama_eval_embd(
  196. struct llama_context * ctx,
  197. const float * embd,
  198. int n_tokens,
  199. int n_past,
  200. int n_threads);
  201. // Export a static computation graph for context of 511 and batch size of 1
  202. // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
  203. // parameters here to keep things simple
  204. // IMPORTANT: do not use for anything else other than debugging and testing!
  205. LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
  206. // Convert the provided text into tokens.
  207. // The tokens pointer must be large enough to hold the resulting tokens.
  208. // Returns the number of tokens on success, no more than n_max_tokens
  209. // Returns a negative number on failure - the number of tokens that would have been returned
  210. // TODO: not sure if correct
  211. LLAMA_API int llama_tokenize(
  212. struct llama_context * ctx,
  213. const char * text,
  214. llama_token * tokens,
  215. int n_max_tokens,
  216. bool add_bos);
  217. LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
  218. LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
  219. LLAMA_API int llama_n_embd (const struct llama_context * ctx);
  220. // Get the vocabulary as output parameters.
  221. // Returns number of results.
  222. LLAMA_API int llama_get_vocab(
  223. const struct llama_context * ctx,
  224. const char * * strings,
  225. float * scores,
  226. int capacity);
  227. // Token logits obtained from the last call to llama_eval()
  228. // The logits for the last token are stored in the last row
  229. // Can be mutated in order to change the probabilities of the next token
  230. // Rows: n_tokens
  231. // Cols: n_vocab
  232. LLAMA_API float * llama_get_logits(struct llama_context * ctx);
  233. // Get the embeddings for the input
  234. // shape: [n_embd] (1-dimensional)
  235. LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
  236. // Token Id -> String. Uses the vocabulary in the provided context
  237. LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token);
  238. // Special tokens
  239. LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
  240. LLAMA_API llama_token llama_token_eos(); // end-of-sentence
  241. LLAMA_API llama_token llama_token_nl(); // next-line
  242. // Sampling functions
  243. /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
  244. LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
  245. /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
  246. LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
  247. /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
  248. LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
  249. /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  250. LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
  251. /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
  252. LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
  253. /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
  254. LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
  255. /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
  256. LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
  257. LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
  258. /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  259. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  260. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  261. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  262. /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
  263. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  264. LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
  265. /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
  266. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
  267. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
  268. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
  269. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
  270. LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
  271. /// @details Selects the token with the highest probability.
  272. LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
  273. /// @details Randomly selects a token from the candidates based on their probabilities.
  274. LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
  275. // Performance information
  276. LLAMA_API void llama_print_timings(struct llama_context * ctx);
  277. LLAMA_API void llama_reset_timings(struct llama_context * ctx);
  278. // Print system information
  279. LLAMA_API const char * llama_print_system_info(void);
  280. #ifdef __cplusplus
  281. }
  282. #endif
  283. // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
  284. #ifdef LLAMA_API_INTERNAL
  285. #include <vector>
  286. #include <string>
  287. struct ggml_tensor;
  288. const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
  289. #endif
  290. #endif // LLAMA_H