common.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. // Various helper functions and utilities
  2. #pragma once
  3. #include "ggml-opt.h"
  4. #include "llama-cpp.h"
  5. #include <set>
  6. #include <sstream>
  7. #include <string>
  8. #include <string_view>
  9. #include <vector>
  10. #include <map>
  11. #ifdef _WIN32
  12. #define DIRECTORY_SEPARATOR '\\'
  13. #else
  14. #define DIRECTORY_SEPARATOR '/'
  15. #endif // _WIN32
  16. #define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
  17. #define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
  18. #define print_build_info() do { \
  19. fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \
  20. fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
  21. } while(0)
  22. #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
  23. struct common_time_meas {
  24. common_time_meas(int64_t & t_acc, bool disable = false);
  25. ~common_time_meas();
  26. const int64_t t_start_us;
  27. int64_t & t_acc;
  28. };
  29. struct common_adapter_lora_info {
  30. std::string path;
  31. float scale;
  32. std::string task_name;
  33. std::string prompt_prefix;
  34. struct llama_adapter_lora * ptr;
  35. };
  36. using llama_tokens = std::vector<llama_token>;
  37. // build info
  38. extern int LLAMA_BUILD_NUMBER;
  39. extern const char * LLAMA_COMMIT;
  40. extern const char * LLAMA_COMPILER;
  41. extern const char * LLAMA_BUILD_TARGET;
  42. struct common_control_vector_load_info;
  43. //
  44. // CPU utils
  45. //
  46. struct cpu_params {
  47. int n_threads = -1;
  48. bool cpumask[GGML_MAX_N_THREADS] = {false}; // CPU affinity mask.
  49. bool mask_valid = false; // Default: any CPU
  50. enum ggml_sched_priority priority = GGML_SCHED_PRIO_NORMAL; // Scheduling prio : (0 - normal, 1 - medium, 2 - high, 3 - realtime)
  51. bool strict_cpu = false; // Use strict CPU placement
  52. uint32_t poll = 50; // Polling (busywait) level (0 - no polling, 100 - mostly polling)
  53. };
  54. int32_t cpu_get_num_physical_cores();
  55. int32_t cpu_get_num_math();
  56. //
  57. // Common params
  58. //
  59. enum llama_example {
  60. LLAMA_EXAMPLE_COMMON,
  61. LLAMA_EXAMPLE_SPECULATIVE,
  62. LLAMA_EXAMPLE_MAIN,
  63. LLAMA_EXAMPLE_EMBEDDING,
  64. LLAMA_EXAMPLE_PERPLEXITY,
  65. LLAMA_EXAMPLE_RETRIEVAL,
  66. LLAMA_EXAMPLE_PASSKEY,
  67. LLAMA_EXAMPLE_IMATRIX,
  68. LLAMA_EXAMPLE_BENCH,
  69. LLAMA_EXAMPLE_SERVER,
  70. LLAMA_EXAMPLE_CVECTOR_GENERATOR,
  71. LLAMA_EXAMPLE_EXPORT_LORA,
  72. LLAMA_EXAMPLE_MTMD,
  73. LLAMA_EXAMPLE_LOOKUP,
  74. LLAMA_EXAMPLE_PARALLEL,
  75. LLAMA_EXAMPLE_TTS,
  76. LLAMA_EXAMPLE_DIFFUSION,
  77. LLAMA_EXAMPLE_FINETUNE,
  78. LLAMA_EXAMPLE_COUNT,
  79. };
  80. enum common_sampler_type {
  81. COMMON_SAMPLER_TYPE_NONE = 0,
  82. COMMON_SAMPLER_TYPE_DRY = 1,
  83. COMMON_SAMPLER_TYPE_TOP_K = 2,
  84. COMMON_SAMPLER_TYPE_TOP_P = 3,
  85. COMMON_SAMPLER_TYPE_MIN_P = 4,
  86. //COMMON_SAMPLER_TYPE_TFS_Z = 5,
  87. COMMON_SAMPLER_TYPE_TYPICAL_P = 6,
  88. COMMON_SAMPLER_TYPE_TEMPERATURE = 7,
  89. COMMON_SAMPLER_TYPE_XTC = 8,
  90. COMMON_SAMPLER_TYPE_INFILL = 9,
  91. COMMON_SAMPLER_TYPE_PENALTIES = 10,
  92. COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
  93. };
  94. // dimensionality reduction methods, used by cvector-generator
  95. enum dimre_method {
  96. DIMRE_METHOD_PCA,
  97. DIMRE_METHOD_MEAN,
  98. };
  99. enum common_conversation_mode {
  100. COMMON_CONVERSATION_MODE_DISABLED = 0,
  101. COMMON_CONVERSATION_MODE_ENABLED = 1,
  102. COMMON_CONVERSATION_MODE_AUTO = 2,
  103. };
  104. enum common_grammar_trigger_type {
  105. COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN,
  106. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  107. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  108. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  109. };
  110. struct common_grammar_trigger {
  111. common_grammar_trigger_type type;
  112. std::string value;
  113. llama_token token = LLAMA_TOKEN_NULL;
  114. };
  115. // sampling parameters
  116. struct common_params_sampling {
  117. uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler
  118. int32_t n_prev = 64; // number of previous tokens to remember
  119. int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
  120. int32_t min_keep = 0; // 0 = disabled, otherwise samplers should return at least min_keep tokens
  121. int32_t top_k = 40; // <= 0 to use vocab size
  122. float top_p = 0.95f; // 1.0 = disabled
  123. float min_p = 0.05f; // 0.0 = disabled
  124. float xtc_probability = 0.00f; // 0.0 = disabled
  125. float xtc_threshold = 0.10f; // > 0.5 disables XTC
  126. float typ_p = 1.00f; // typical_p, 1.0 = disabled
  127. float temp = 0.80f; // <= 0.0 to sample greedily, 0.0 to not output probabilities
  128. float dynatemp_range = 0.00f; // 0.0 = disabled
  129. float dynatemp_exponent = 1.00f; // controls how entropy maps to temperature in dynamic temperature sampler
  130. int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
  131. float penalty_repeat = 1.00f; // 1.0 = disabled
  132. float penalty_freq = 0.00f; // 0.0 = disabled
  133. float penalty_present = 0.00f; // 0.0 = disabled
  134. float dry_multiplier = 0.0f; // 0.0 = disabled; DRY repetition penalty for tokens extending repetition:
  135. float dry_base = 1.75f; // 0.0 = disabled; multiplier * base ^ (length of sequence before token - allowed length)
  136. int32_t dry_allowed_length = 2; // tokens extending repetitions beyond this receive penalty
  137. int32_t dry_penalty_last_n = -1; // how many tokens to scan for repetitions (0 = disable penalty, -1 = context size)
  138. int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  139. float top_n_sigma = -1.00f;// -1.0 = disabled
  140. float mirostat_tau = 5.00f; // target entropy
  141. float mirostat_eta = 0.10f; // learning rate
  142. bool ignore_eos = false;
  143. bool no_perf = false; // disable performance metrics
  144. bool timing_per_token = false;
  145. std::vector<std::string> dry_sequence_breakers = {"\n", ":", "\"", "*"}; // default sequence breakers for DRY
  146. std::vector<enum common_sampler_type> samplers = {
  147. COMMON_SAMPLER_TYPE_PENALTIES,
  148. COMMON_SAMPLER_TYPE_DRY,
  149. COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
  150. COMMON_SAMPLER_TYPE_TOP_K,
  151. COMMON_SAMPLER_TYPE_TYPICAL_P,
  152. COMMON_SAMPLER_TYPE_TOP_P,
  153. COMMON_SAMPLER_TYPE_MIN_P,
  154. COMMON_SAMPLER_TYPE_XTC,
  155. COMMON_SAMPLER_TYPE_TEMPERATURE,
  156. };
  157. std::string grammar; // optional BNF-like grammar to constrain sampling
  158. bool grammar_lazy = false;
  159. std::vector<common_grammar_trigger> grammar_triggers; // optional triggers (for lazy grammars)
  160. std::set<llama_token> preserved_tokens;
  161. std::vector<llama_logit_bias> logit_bias; // logit biases to apply
  162. std::vector<llama_logit_bias> logit_bias_eog; // pre-calculated logit biases for EOG tokens
  163. // print the parameters into a string
  164. std::string print() const;
  165. };
  166. struct common_params_model {
  167. std::string path = ""; // model local path // NOLINT
  168. std::string url = ""; // model url to download // NOLINT
  169. std::string hf_repo = ""; // HF repo // NOLINT
  170. std::string hf_file = ""; // HF file // NOLINT
  171. std::string docker_repo = ""; // Docker repo // NOLINT
  172. };
  173. struct common_params_speculative {
  174. std::vector<ggml_backend_dev_t> devices; // devices to use for offloading
  175. int32_t n_ctx = 0; // draft context size
  176. int32_t n_max = 16; // maximum number of tokens to draft during speculative decoding
  177. int32_t n_min = 0; // minimum number of draft tokens to use for speculative decoding
  178. int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
  179. float p_split = 0.1f; // speculative decoding split probability
  180. float p_min = 0.75f; // minimum speculative decoding probability (greedy)
  181. std::vector<std::pair<std::string, std::string>> replacements; // main to speculative model replacements
  182. std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
  183. ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
  184. ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
  185. struct cpu_params cpuparams;
  186. struct cpu_params cpuparams_batch;
  187. struct common_params_model model;
  188. };
  189. struct common_params_vocoder {
  190. struct common_params_model model;
  191. std::string speaker_file = ""; // speaker file path // NOLINT
  192. bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy // NOLINT
  193. };
  194. struct common_params_diffusion {
  195. int32_t steps = 128;
  196. bool visual_mode = false;
  197. float eps = 0; // epsilon for timesteps
  198. int32_t block_length = 0; // block length for generation
  199. int32_t algorithm = 4; // default algorithm: low-confidence
  200. float alg_temp = 0.0f; // algorithm temperature
  201. float cfg_scale = 0; // classifier-free guidance scale
  202. bool add_gumbel_noise = false; // add gumbel noise to the logits if temp > 0.0
  203. };
  204. // reasoning API response format (not to be confused as chat template's reasoning format)
  205. enum common_reasoning_format {
  206. COMMON_REASONING_FORMAT_NONE,
  207. COMMON_REASONING_FORMAT_AUTO, // Same as deepseek, using `message.reasoning_content`
  208. COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in <think> tags in stream mode
  209. COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas.
  210. // do not extend this enum unless you absolutely have to
  211. // in most cases, use COMMON_REASONING_FORMAT_AUTO
  212. // see: https://github.com/ggml-org/llama.cpp/pull/15408
  213. };
  214. struct lr_opt {
  215. float lr0 = 1e-5; // learning rate at first epoch
  216. float lr_min = -1;
  217. float decay_epochs = -1; // if >0, the learning rate starts at lr0 and decays to lr_min after this many epochs
  218. float scale_epoch = 0;
  219. float wd = 0;
  220. unsigned epochs = 2;
  221. unsigned epoch; // set by optimizer outer (epochs) loop
  222. // learning rate decay - constant LR per epoch only for now
  223. float get_lr(float e) const;
  224. float get_lr() const { return get_lr(epoch); }
  225. // must call after arg parse, before get_lr
  226. void init();
  227. };
  228. struct ggml_opt_optimizer_params common_opt_lr_pars(void * userdata);
  229. struct common_params {
  230. int32_t n_predict = -1; // new tokens to predict
  231. int32_t n_ctx = 4096; // context size
  232. int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
  233. int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
  234. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  235. int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
  236. int32_t n_parallel = 1; // number of parallel sequences to decode
  237. int32_t n_sequences = 1; // number of sequences to decode
  238. int32_t grp_attn_n = 1; // group-attention factor
  239. int32_t grp_attn_w = 512; // group-attention width
  240. int32_t n_print = -1; // print token count every n tokens (-1 = disabled)
  241. float rope_freq_base = 0.0f; // RoPE base frequency
  242. float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
  243. float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
  244. float yarn_attn_factor = -1.0f; // YaRN magnitude scaling factor
  245. float yarn_beta_fast = -1.0f; // YaRN low correction dim
  246. float yarn_beta_slow = -1.0f; // YaRN high correction dim
  247. int32_t yarn_orig_ctx = 0; // YaRN original context length
  248. // offload params
  249. std::vector<ggml_backend_dev_t> devices; // devices to use for offloading
  250. int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
  251. int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
  252. float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs
  253. enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
  254. struct cpu_params cpuparams;
  255. struct cpu_params cpuparams_batch;
  256. ggml_backend_sched_eval_callback cb_eval = nullptr;
  257. void * cb_eval_user_data = nullptr;
  258. ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
  259. enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  260. enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
  261. enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
  262. enum llama_flash_attn_type flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO; // whether to use Flash Attention
  263. struct common_params_sampling sampling;
  264. struct common_params_speculative speculative;
  265. struct common_params_vocoder vocoder;
  266. struct common_params_diffusion diffusion;
  267. struct common_params_model model;
  268. std::string model_alias = ""; // model alias // NOLINT
  269. std::string hf_token = ""; // HF token // NOLINT
  270. std::string prompt = ""; // NOLINT
  271. std::string system_prompt = ""; // NOLINT
  272. std::string prompt_file = ""; // store the external prompt file name // NOLINT
  273. std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state // NOLINT
  274. std::string input_prefix = ""; // string to prefix user inputs with // NOLINT
  275. std::string input_suffix = ""; // string to suffix user inputs with // NOLINT
  276. std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding // NOLINT
  277. std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding // NOLINT
  278. std::string logits_file = ""; // file for saving *all* logits // NOLINT
  279. std::vector<std::string> in_files; // all input files
  280. std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
  281. std::vector<llama_model_kv_override> kv_overrides;
  282. std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
  283. bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_adapter_lora_apply)
  284. std::vector<common_adapter_lora_info> lora_adapters; // lora adapter path with user defined scale
  285. std::vector<common_control_vector_load_info> control_vectors; // control vector with user defined scale
  286. int32_t verbosity = 0;
  287. int32_t control_vector_layer_start = -1; // layer range for control vector
  288. int32_t control_vector_layer_end = -1; // layer range for control vector
  289. bool offline = false;
  290. int32_t ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
  291. int32_t ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
  292. // (which is more convenient to use for plotting)
  293. //
  294. bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
  295. size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
  296. bool winogrande = false; // compute Winogrande score over random tasks from datafile supplied in prompt
  297. size_t winogrande_tasks = 0; // number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed
  298. bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt
  299. size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed
  300. bool kl_divergence = false; // compute KL divergence
  301. bool usage = false; // print usage
  302. bool completion = false; // print source-able completion script
  303. bool use_color = false; // use color to distinguish generations and inputs
  304. bool special = false; // enable special token output
  305. bool interactive = false; // interactive mode
  306. bool interactive_first = false; // wait for user input immediately
  307. bool prompt_cache_all = false; // save user input and generations to prompt cache
  308. bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
  309. bool escape = true; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
  310. bool multiline_input = false; // reverse the usage of `\`
  311. bool simple_io = false; // improves compatibility with subprocesses and limited consoles
  312. bool cont_batching = true; // insert new sequences for decoding on-the-fly
  313. bool no_perf = false; // disable performance metrics
  314. bool ctx_shift = false; // context shift on infinite text generation
  315. bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
  316. bool kv_unified = false; // enable unified KV cache
  317. bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
  318. bool use_mmap = true; // use mmap for faster loads
  319. bool use_mlock = false; // use mlock to keep model in memory
  320. bool verbose_prompt = false; // print prompt tokens before generation
  321. bool display_prompt = true; // print prompt before generation
  322. bool no_kv_offload = false; // disable KV offloading
  323. bool warmup = true; // warmup run
  324. bool check_tensors = false; // validate tensor data
  325. bool no_op_offload = false; // globally disable offload host tensor operations to device
  326. bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking)
  327. bool no_host = false; // bypass host buffer allowing extra buffers to be used
  328. bool single_turn = false; // single turn chat conversation
  329. ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
  330. ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
  331. common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
  332. // multimodal models (see tools/mtmd)
  333. struct common_params_model mmproj;
  334. bool mmproj_use_gpu = true; // use GPU for multimodal model
  335. bool no_mmproj = false; // explicitly disable multimodal model
  336. std::vector<std::string> image; // path to image file(s)
  337. int image_min_tokens = -1;
  338. int image_max_tokens = -1;
  339. // finetune
  340. struct lr_opt lr;
  341. enum ggml_opt_optimizer_type optimizer = GGML_OPT_OPTIMIZER_TYPE_ADAMW;
  342. float val_split = 0.05f; // fraction of the data used for the validation set
  343. // embedding
  344. bool embedding = false; // get only sentence embedding
  345. int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
  346. std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
  347. std::string embd_sep = "\n"; // separator of embeddings
  348. std::string cls_sep = "\t"; // separator of classification sequences
  349. // server params
  350. int32_t port = 8080; // server listens on this network port
  351. int32_t timeout_read = 600; // http read timeout in seconds
  352. int32_t timeout_write = timeout_read; // http write timeout in seconds
  353. int32_t n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
  354. int32_t n_cache_reuse = 0; // min chunk size to reuse from the cache via KV shifting
  355. int32_t n_ctx_checkpoints = 8; // max number of context checkpoints per slot
  356. int32_t cache_ram_mib = 8192; // -1 = no limit, 0 - disable, 1 = 1 MiB, etc.
  357. std::string hostname = "127.0.0.1";
  358. std::string public_path = ""; // NOLINT
  359. std::string api_prefix = ""; // NOLINT
  360. std::string chat_template = ""; // NOLINT
  361. bool use_jinja = false; // NOLINT
  362. bool enable_chat_template = true;
  363. common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
  364. int reasoning_budget = -1;
  365. bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
  366. std::vector<std::string> api_keys;
  367. std::string ssl_file_key = ""; // NOLINT
  368. std::string ssl_file_cert = ""; // NOLINT
  369. std::map<std::string, std::string> default_template_kwargs;
  370. // "advanced" endpoints are disabled by default for better security
  371. bool webui = true;
  372. bool endpoint_slots = true;
  373. bool endpoint_props = false; // only control POST requests, not GET
  374. bool endpoint_metrics = false;
  375. bool log_json = false;
  376. std::string slot_save_path;
  377. float slot_prompt_similarity = 0.1f;
  378. // batched-bench params
  379. bool is_pp_shared = false;
  380. bool is_tg_separate = false;
  381. std::vector<int32_t> n_pp;
  382. std::vector<int32_t> n_tg;
  383. std::vector<int32_t> n_pl;
  384. // retrieval params
  385. std::vector<std::string> context_files; // context files to embed
  386. int32_t chunk_size = 64; // chunk size for context embedding
  387. std::string chunk_separator = "\n"; // chunk separator for context embedding
  388. // passkey params
  389. int32_t n_junk = 250; // number of times to repeat the junk text
  390. int32_t i_pos = -1; // position of the passkey in the junk text
  391. // imatrix params
  392. int32_t n_out_freq = 10; // output the imatrix every n_out_freq iterations
  393. int32_t n_save_freq = 0; // save the imatrix every n_save_freq iterations
  394. int32_t i_chunk = 0; // start processing from this chunk
  395. int8_t imat_dat = 0; // whether the legacy imatrix.dat format should be output (gguf <= 0 < dat)
  396. bool process_output = false; // collect data for the output tensor
  397. bool compute_ppl = true; // whether to compute perplexity
  398. bool show_statistics = false; // show imatrix statistics per tensor
  399. bool parse_special = false; // whether to parse special tokens during imatrix tokenization
  400. // cvector-generator params
  401. int n_pca_batch = 100;
  402. int n_pca_iterations = 1000;
  403. dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
  404. std::string cvector_positive_file = "tools/cvector-generator/positive.txt";
  405. std::string cvector_negative_file = "tools/cvector-generator/negative.txt";
  406. bool spm_infill = false; // suffix/prefix/middle pattern for infill
  407. // batched-bench params
  408. bool batched_bench_output_jsonl = false;
  409. // common params
  410. std::string out_file; // output filename for all example programs
  411. // optional callback for model loading progress and cancellation:
  412. // called with a progress value between 0.0 and 1.0.
  413. // return false from callback to abort model loading or true to continue
  414. llama_progress_callback load_progress_callback = NULL;
  415. void * load_progress_callback_user_data = NULL;
  416. bool has_speculative() const {
  417. return !speculative.model.path.empty() || !speculative.model.hf_repo.empty();
  418. }
  419. };
  420. // call once at the start of a program if it uses libcommon
  421. // initializes the logging system and prints info about the build
  422. void common_init();
  423. std::string common_params_get_system_info(const common_params & params);
  424. bool parse_cpu_range(const std::string & range, bool(&boolmask)[GGML_MAX_N_THREADS]);
  425. bool parse_cpu_mask(const std::string & mask, bool(&boolmask)[GGML_MAX_N_THREADS]);
  426. void postprocess_cpu_params(cpu_params & cpuparams, const cpu_params * role_model = nullptr);
  427. bool set_process_priority(enum ggml_sched_priority prio);
  428. //
  429. // String utils
  430. //
  431. #ifdef __GNUC__
  432. # if defined(__MINGW32__) && !defined(__clang__)
  433. # define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  434. # else
  435. # define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  436. # endif
  437. #else
  438. # define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
  439. #endif
  440. LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2)
  441. std::string string_format(const char * fmt, ...);
  442. std::string string_strip(const std::string & str);
  443. std::string string_get_sortable_timestamp();
  444. std::string string_join(const std::vector<std::string> & values, const std::string & separator);
  445. std::vector<std::string> string_split(const std::string & str, const std::string & delimiter);
  446. std::string string_repeat(const std::string & str, size_t n);
  447. void string_replace_all(std::string & s, const std::string & search, const std::string & replace);
  448. std::string regex_escape(const std::string & s);
  449. template<class T>
  450. static std::vector<T> string_split(const std::string & str, char delim) {
  451. static_assert(!std::is_same<T, std::string>::value, "Please use the specialized version for std::string");
  452. std::vector<T> values;
  453. std::istringstream str_stream(str);
  454. std::string token;
  455. while (std::getline(str_stream, token, delim)) {
  456. T value;
  457. std::istringstream token_stream(token);
  458. token_stream >> value;
  459. values.push_back(value);
  460. }
  461. return values;
  462. }
  463. template<>
  464. std::vector<std::string> string_split<std::string>(const std::string & input, char separator)
  465. {
  466. std::vector<std::string> parts;
  467. size_t begin_pos = 0;
  468. size_t separator_pos = input.find(separator);
  469. while (separator_pos != std::string::npos) {
  470. std::string part = input.substr(begin_pos, separator_pos - begin_pos);
  471. parts.emplace_back(part);
  472. begin_pos = separator_pos + 1;
  473. separator_pos = input.find(separator, begin_pos);
  474. }
  475. parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos));
  476. return parts;
  477. }
  478. static bool string_starts_with(const std::string & str,
  479. const std::string & prefix) { // While we wait for C++20's std::string::starts_with...
  480. return str.rfind(prefix, 0) == 0;
  481. }
  482. // While we wait for C++20's std::string::ends_with...
  483. bool string_ends_with(const std::string_view & str, const std::string_view & suffix);
  484. bool string_remove_suffix(std::string & str, const std::string_view & suffix);
  485. size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop);
  486. bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
  487. void string_process_escapes(std::string & input);
  488. std::string string_from(bool value);
  489. std::string string_from(const std::vector<int> & values);
  490. std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens);
  491. std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch);
  492. //
  493. // Filesystem utils
  494. //
  495. bool fs_validate_filename(const std::string & filename);
  496. bool fs_create_directory_with_parents(const std::string & path);
  497. std::string fs_get_cache_directory();
  498. std::string fs_get_cache_file(const std::string & filename);
  499. struct common_file_info {
  500. std::string path;
  501. std::string name;
  502. size_t size = 0; // in bytes
  503. };
  504. std::vector<common_file_info> fs_list_files(const std::string & path);
  505. //
  506. // Model utils
  507. //
  508. // note: defines object's lifetime
  509. struct common_init_result {
  510. llama_model_ptr model;
  511. llama_context_ptr context;
  512. std::vector<llama_adapter_lora_ptr> lora;
  513. };
  514. struct common_init_result common_init_from_params(common_params & params);
  515. struct llama_model_params common_model_params_to_llama ( common_params & params);
  516. struct llama_context_params common_context_params_to_llama(const common_params & params);
  517. struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
  518. // clear LoRA adapters from context, then apply new list of adapters
  519. void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
  520. std::string get_model_endpoint();
  521. //
  522. // Batch utils
  523. //
  524. void common_batch_clear(struct llama_batch & batch);
  525. void common_batch_add(
  526. struct llama_batch & batch,
  527. llama_token id,
  528. llama_pos pos,
  529. const std::vector<llama_seq_id> & seq_ids,
  530. bool logits);
  531. //
  532. // Token utils
  533. //
  534. // longest common prefix
  535. size_t common_lcp(const llama_tokens & a, const llama_tokens & b);
  536. // longet common subsequence
  537. size_t common_lcs(const llama_tokens & a, const llama_tokens & b);
  538. //
  539. // Vocab utils
  540. //
  541. // tokenizes a string into a vector of tokens
  542. // should work similar to Python's `tokenizer.encode`
  543. std::vector<llama_token> common_tokenize(
  544. const struct llama_context * ctx,
  545. const std::string & text,
  546. bool add_special,
  547. bool parse_special = false);
  548. std::vector<llama_token> common_tokenize(
  549. const struct llama_vocab * vocab,
  550. const std::string & text,
  551. bool add_special,
  552. bool parse_special = false);
  553. // tokenizes a token into a piece, optionally renders special/control tokens
  554. // should work similar to Python's `tokenizer.id_to_piece`
  555. std::string common_token_to_piece(
  556. const struct llama_context * ctx,
  557. llama_token token,
  558. bool special = true);
  559. std::string common_token_to_piece(
  560. const struct llama_vocab * vocab,
  561. llama_token token,
  562. bool special = true);
  563. // detokenizes a vector of tokens into a string
  564. // should work similar to Python's `tokenizer.decode`
  565. // optionally renders special/control tokens
  566. std::string common_detokenize(
  567. const struct llama_context * ctx,
  568. const std::vector<llama_token> & tokens,
  569. bool special = true);
  570. std::string common_detokenize(
  571. const struct llama_vocab * vocab,
  572. const std::vector<llama_token> & tokens,
  573. bool special = true);
  574. //
  575. // Embedding utils
  576. //
  577. // TODO: repace embd_norm with an enum
  578. void common_embd_normalize(const float * inp, float * out, int n, int embd_norm);
  579. float common_embd_similarity_cos(const float * embd1, const float * embd2, int n);
  580. //
  581. // Control vector utils
  582. //
  583. struct common_control_vector_data {
  584. int n_embd;
  585. // stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
  586. std::vector<float> data;
  587. };
  588. struct common_control_vector_load_info {
  589. float strength;
  590. std::string fname;
  591. };
  592. // Load control vectors, scale each by strength, and add them together.
  593. // On error, returns {-1, empty}
  594. common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
  595. //
  596. // Split utils
  597. //
  598. namespace {
  599. const char * const LLM_KV_SPLIT_NO = "split.no";
  600. const char * const LLM_KV_SPLIT_COUNT = "split.count";
  601. const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
  602. }
  603. //
  604. // MoE utils
  605. //
  606. const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate)_(ch|)exps";
  607. static std::string llm_ffn_exps_block_regex(int idx) {
  608. return string_format("blk\\.%d%s", idx, LLM_FFN_EXPS_REGEX);
  609. }
  610. static llama_model_tensor_buft_override llm_ffn_exps_cpu_override() {
  611. return { LLM_FFN_EXPS_REGEX, ggml_backend_cpu_buffer_type() };
  612. }
  613. //
  614. // training utils
  615. //
  616. ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
  617. // "adamw" or "sgd" (case insensitive)
  618. enum ggml_opt_optimizer_type common_opt_get_optimizer(const char *);