common.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. // Various helper functions and utilities
  2. #pragma once
  3. #include "llama.h"
  4. #include "sampling.h"
  5. #define LOG_NO_FILE_LINE_FUNCTION
  6. #include "log.h"
  7. #include <cmath>
  8. #include <string>
  9. #include <vector>
  10. #include <random>
  11. #include <thread>
  12. #include <unordered_map>
  13. #include <tuple>
  14. #ifdef _WIN32
  15. #define DIRECTORY_SEPARATOR '\\'
  16. #else
  17. #define DIRECTORY_SEPARATOR '/'
  18. #endif // _WIN32
  19. #define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
  20. #define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
  21. #define print_build_info() do { \
  22. fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \
  23. fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
  24. } while(0)
  25. #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
  26. // build info
  27. extern int LLAMA_BUILD_NUMBER;
  28. extern char const *LLAMA_COMMIT;
  29. extern char const *LLAMA_COMPILER;
  30. extern char const *LLAMA_BUILD_TARGET;
  31. struct llama_control_vector_load_info;
  32. int get_math_cpu_count();
  33. int32_t get_num_physical_cores();
  34. //
  35. // CLI argument parsing
  36. //
  37. struct gpt_params {
  38. uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
  39. int32_t n_threads = get_math_cpu_count();
  40. int32_t n_threads_draft = -1;
  41. int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
  42. int32_t n_threads_batch_draft = -1;
  43. int32_t n_predict = -1; // new tokens to predict
  44. int32_t n_ctx = 512; // context size
  45. int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
  46. int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
  47. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  48. int32_t n_draft = 5; // number of tokens to draft during speculative decoding
  49. int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
  50. int32_t n_parallel = 1; // number of parallel sequences to decode
  51. int32_t n_sequences = 1; // number of sequences to decode
  52. float p_split = 0.1f; // speculative decoding split probability
  53. int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
  54. int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
  55. llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
  56. int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
  57. float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs
  58. int32_t n_beams = 0; // if non-zero then use beam search of given width.
  59. int32_t grp_attn_n = 1; // group-attention factor
  60. int32_t grp_attn_w = 512; // group-attention width
  61. int32_t n_print = -1; // print token count every n tokens (-1 = disabled)
  62. float rope_freq_base = 0.0f; // RoPE base frequency
  63. float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
  64. float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
  65. float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
  66. float yarn_beta_fast = 32.0f; // YaRN low correction dim
  67. float yarn_beta_slow = 1.0f; // YaRN high correction dim
  68. int32_t yarn_orig_ctx = 0; // YaRN original context length
  69. float defrag_thold = -1.0f; // KV cache defragmentation threshold
  70. ggml_backend_sched_eval_callback cb_eval = nullptr;
  71. void * cb_eval_user_data = nullptr;
  72. ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
  73. enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  74. enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
  75. // // sampling parameters
  76. struct llama_sampling_params sparams;
  77. std::string model = ""; // model path
  78. std::string model_draft = ""; // draft model for speculative decoding
  79. std::string model_alias = "unknown"; // model alias
  80. std::string model_url = ""; // model url to download
  81. std::string hf_repo = ""; // HF repo
  82. std::string hf_file = ""; // HF file
  83. std::string prompt = "";
  84. std::string prompt_file = ""; // store the external prompt file name
  85. std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
  86. std::string input_prefix = ""; // string to prefix user inputs with
  87. std::string input_suffix = ""; // string to suffix user inputs with
  88. std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
  89. std::string logdir = ""; // directory in which to save YAML log files
  90. std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding
  91. std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding
  92. std::string logits_file = ""; // file for saving *all* logits
  93. std::vector<llama_model_kv_override> kv_overrides;
  94. // TODO: avoid tuple, use struct
  95. std::vector<std::tuple<std::string, float>> lora_adapter; // lora adapter path with user defined scale
  96. std::string lora_base = ""; // base model path for the lora adapter
  97. std::vector<llama_control_vector_load_info> control_vectors; // control vector with user defined scale
  98. int32_t control_vector_layer_start = -1; // layer range for control vector
  99. int32_t control_vector_layer_end = -1; // layer range for control vector
  100. int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
  101. int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
  102. // (which is more convenient to use for plotting)
  103. //
  104. bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
  105. size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
  106. bool winogrande = false; // compute Winogrande score over random tasks from datafile supplied in prompt
  107. size_t winogrande_tasks= 0; // number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed
  108. bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt
  109. size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed
  110. bool kl_divergence = false; // compute KL divergence
  111. bool random_prompt = false; // do not randomize prompt if none provided
  112. bool use_color = false; // use color to distinguish generations and inputs
  113. bool interactive = false; // interactive mode
  114. bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
  115. bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
  116. bool chatml = false; // chatml mode (used for models trained on chatml syntax)
  117. bool prompt_cache_all = false; // save user input and generations to prompt cache
  118. bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
  119. bool embedding = false; // get only sentence embedding
  120. bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
  121. bool interactive_first = false; // wait for user input immediately
  122. bool multiline_input = false; // reverse the usage of `\`
  123. bool simple_io = false; // improves compatibility with subprocesses and limited consoles
  124. bool cont_batching = true; // insert new sequences for decoding on-the-fly
  125. bool flash_attn = false; // flash attention
  126. bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
  127. bool ignore_eos = false; // ignore generated EOS tokens
  128. bool instruct = false; // instruction mode (used for Alpaca models)
  129. bool logits_all = false; // return logits for all tokens in the batch
  130. bool use_mmap = true; // use mmap for faster loads
  131. bool use_mlock = false; // use mlock to keep model in memory
  132. bool verbose_prompt = false; // print prompt tokens before generation
  133. bool display_prompt = true; // print prompt before generation
  134. bool infill = false; // use infill mode
  135. bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
  136. bool no_kv_offload = false; // disable KV offloading
  137. bool warmup = true; // warmup run
  138. bool check_tensors = false; // validate tensor data
  139. std::string cache_type_k = "f16"; // KV cache data type for the K
  140. std::string cache_type_v = "f16"; // KV cache data type for the V
  141. // multimodal models (see examples/llava)
  142. std::string mmproj = ""; // path to multimodal projector
  143. std::vector<std::string> image; // path to image file(s)
  144. };
  145. void gpt_params_handle_model_default(gpt_params & params);
  146. bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
  147. bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
  148. bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
  149. void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
  150. bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
  151. std::string get_system_info(const gpt_params & params);
  152. std::string gpt_random_prompt(std::mt19937 & rng);
  153. void process_escapes(std::string& input);
  154. bool validate_file_name(const std::string & filename);
  155. //
  156. // String utils
  157. //
  158. std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
  159. std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & names_string);
  160. std::vector<std::string> string_split(std::string input, char separator);
  161. std::string string_strip(const std::string & str);
  162. std::string sampler_type_to_name_string(llama_sampler_type sampler_type);
  163. //
  164. // Model utils
  165. //
  166. // TODO: avoid tuplue, use struct
  167. std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params);
  168. struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
  169. struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
  170. struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const struct llama_model_params & params);
  171. struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const struct llama_model_params & params);
  172. // Batch utils
  173. void llama_batch_clear(struct llama_batch & batch);
  174. void llama_batch_add(
  175. struct llama_batch & batch,
  176. llama_token id,
  177. llama_pos pos,
  178. const std::vector<llama_seq_id> & seq_ids,
  179. bool logits);
  180. //
  181. // Vocab utils
  182. //
  183. // tokenizes a string into a vector of tokens
  184. // should work similar to Python's `tokenizer.encode`
  185. std::vector<llama_token> llama_tokenize(
  186. const struct llama_context * ctx,
  187. const std::string & text,
  188. bool add_special,
  189. bool parse_special = false);
  190. std::vector<llama_token> llama_tokenize(
  191. const struct llama_model * model,
  192. const std::string & text,
  193. bool add_special,
  194. bool parse_special = false);
  195. // tokenizes a token into a piece, optionally renders special/control tokens
  196. // should work similar to Python's `tokenizer.id_to_piece`
  197. std::string llama_token_to_piece(
  198. const struct llama_context * ctx,
  199. llama_token token,
  200. bool special = true);
  201. // TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
  202. // that takes into account the tokenizer type and decides how to handle the leading space
  203. //
  204. // detokenizes a vector of tokens into a string
  205. // should work similar to Python's `tokenizer.decode`
  206. // removes the leading space from the first non-BOS token
  207. std::string llama_detokenize_spm(
  208. llama_context * ctx,
  209. const std::vector<llama_token> & tokens);
  210. // detokenizes a vector of tokens into a string
  211. // should work similar to Python's `tokenizer.decode`
  212. std::string llama_detokenize_bpe(
  213. llama_context * ctx,
  214. const std::vector<llama_token> & tokens);
  215. // Uses the value from the model metadata if possible, otherwise
  216. // defaults to true when model type is SPM, otherwise false.
  217. bool llama_should_add_bos_token(const llama_model * model);
  218. //
  219. // YAML utils
  220. //
  221. bool create_directory_with_parents(const std::string & path);
  222. void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
  223. void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
  224. void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
  225. std::string get_sortable_timestamp();
  226. void dump_non_result_info_yaml(
  227. FILE * stream, const gpt_params & params, const llama_context * lctx,
  228. const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
  229. //
  230. // KV cache utils
  231. //
  232. // Dump the KV cache view with the number of sequences per cell.
  233. void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
  234. // Dump the KV cache view showing individual sequences in each cell (long output).
  235. void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
  236. //
  237. // Embedding utils
  238. //
  239. void llama_embd_normalize(const float * inp, float * out, int n);
  240. float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n);
  241. //
  242. // Control vector utils
  243. //
  244. struct llama_control_vector_data {
  245. int n_embd;
  246. // stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
  247. std::vector<float> data;
  248. };
  249. struct llama_control_vector_load_info {
  250. float strength;
  251. std::string fname;
  252. };
  253. // Load control vectors, scale each by strength, and add them together.
  254. // On error, returns {-1, empty}
  255. llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
  256. //
  257. // Split utils
  258. //
  259. static const char * const LLM_KV_SPLIT_NO = "split.no";
  260. static const char * const LLM_KV_SPLIT_COUNT = "split.count";
  261. static const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";