common.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. // Various helper functions and utilities
  2. #pragma once
  3. #include "llama.h"
  4. #include "sampling.h"
  5. #define LOG_NO_FILE_LINE_FUNCTION
  6. #include "log.h"
  7. #include <cmath>
  8. #include <string>
  9. #include <vector>
  10. #include <random>
  11. #include <thread>
  12. #include <unordered_map>
  13. #include <tuple>
  14. #ifdef _WIN32
  15. #define DIRECTORY_SEPARATOR '\\'
  16. #else
  17. #define DIRECTORY_SEPARATOR '/'
  18. #endif // _WIN32
  19. #define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
  20. #define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
  21. #define print_build_info() do { \
  22. fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \
  23. fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
  24. } while(0)
  25. // build info
  26. extern int LLAMA_BUILD_NUMBER;
  27. extern char const *LLAMA_COMMIT;
  28. extern char const *LLAMA_COMPILER;
  29. extern char const *LLAMA_BUILD_TARGET;
  30. //
  31. // CLI argument parsing
  32. //
  33. int32_t get_num_physical_cores();
  34. struct gpt_params {
  35. uint32_t seed = -1; // RNG seed
  36. int32_t n_threads = get_num_physical_cores();
  37. int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
  38. int32_t n_predict = -1; // new tokens to predict
  39. int32_t n_ctx = 512; // context size
  40. int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
  41. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  42. int32_t n_draft = 16; // number of tokens to draft during speculative decoding
  43. int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
  44. int32_t n_parallel = 1; // number of parallel sequences to decode
  45. int32_t n_sequences = 1; // number of sequences to decode
  46. float p_accept = 0.5f; // speculative decoding accept probability
  47. float p_split = 0.1f; // speculative decoding split probability
  48. int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
  49. int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
  50. int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
  51. float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
  52. int32_t n_beams = 0; // if non-zero then use beam search of given width.
  53. float rope_freq_base = 0.0f; // RoPE base frequency
  54. float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
  55. float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
  56. float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
  57. float yarn_beta_fast = 32.0f; // YaRN low correction dim
  58. float yarn_beta_slow = 1.0f; // YaRN high correction dim
  59. int32_t yarn_orig_ctx = 0; // YaRN original context length
  60. int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment
  61. // pinging @cebtenzzre
  62. // // sampling parameters
  63. struct llama_sampling_params sparams;
  64. std::string model = "models/7B/ggml-model-f16.gguf"; // model path
  65. std::string model_draft = ""; // draft model for speculative decoding
  66. std::string model_alias = "unknown"; // model alias
  67. std::string prompt = "";
  68. std::string prompt_file = ""; // store the external prompt file name
  69. std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
  70. std::string input_prefix = ""; // string to prefix user inputs with
  71. std::string input_suffix = ""; // string to suffix user inputs with
  72. std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
  73. std::string logdir = ""; // directory in which to save YAML log files
  74. // TODO: avoid tuple, use struct
  75. std::vector<std::tuple<std::string, float>> lora_adapter; // lora adapter path with user defined scale
  76. std::string lora_base = ""; // base model path for the lora adapter
  77. int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
  78. int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
  79. // (which is more convenient to use for plotting)
  80. //
  81. bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
  82. size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
  83. bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
  84. bool memory_f16 = true; // use f16 instead of f32 for memory kv
  85. bool random_prompt = false; // do not randomize prompt if none provided
  86. bool use_color = false; // use color to distinguish generations and inputs
  87. bool interactive = false; // interactive mode
  88. bool chatml = false; // chatml mode (used for models trained on chatml syntax)
  89. bool prompt_cache_all = false; // save user input and generations to prompt cache
  90. bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
  91. bool embedding = false; // get only sentence embedding
  92. bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
  93. bool interactive_first = false; // wait for user input immediately
  94. bool multiline_input = false; // reverse the usage of `\`
  95. bool simple_io = false; // improves compatibility with subprocesses and limited consoles
  96. bool cont_batching = false; // insert new sequences for decoding on-the-fly
  97. bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
  98. bool ignore_eos = false; // ignore generated EOS tokens
  99. bool instruct = false; // instruction mode (used for Alpaca models)
  100. bool logits_all = false; // return logits for all tokens in the batch
  101. bool use_mmap = true; // use mmap for faster loads
  102. bool use_mlock = false; // use mlock to keep model in memory
  103. bool numa = false; // attempt optimizations that help on some NUMA systems
  104. bool verbose_prompt = false; // print prompt tokens before generation
  105. bool infill = false; // use infill mode
  106. // multimodal models (see examples/llava)
  107. std::string mmproj = ""; // path to multimodal projector
  108. std::string image = ""; // path to an image file
  109. };
  110. bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
  111. bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
  112. void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
  113. std::string get_system_info(const gpt_params & params);
  114. std::string gpt_random_prompt(std::mt19937 & rng);
  115. void process_escapes(std::string& input);
  116. //
  117. // Model utils
  118. //
  119. // TODO: avoid tuplue, use struct
  120. std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params);
  121. struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
  122. struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
  123. // Batch utils
  124. void llama_batch_clear(struct llama_batch & batch);
  125. void llama_batch_add(
  126. struct llama_batch & batch,
  127. llama_token id,
  128. llama_pos pos,
  129. const std::vector<llama_seq_id> & seq_ids,
  130. bool logits);
  131. //
  132. // Vocab utils
  133. //
  134. // tokenizes a string into a vector of tokens
  135. // should work similar to Python's `tokenizer.encode`
  136. std::vector<llama_token> llama_tokenize(
  137. const struct llama_context * ctx,
  138. const std::string & text,
  139. bool add_bos,
  140. bool special = false);
  141. std::vector<llama_token> llama_tokenize(
  142. const struct llama_model * model,
  143. const std::string & text,
  144. bool add_bos,
  145. bool special = false);
  146. // tokenizes a token into a piece
  147. // should work similar to Python's `tokenizer.id_to_piece`
  148. std::string llama_token_to_piece(
  149. const struct llama_context * ctx,
  150. llama_token token);
  151. // TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
  152. // that takes into account the tokenizer type and decides how to handle the leading space
  153. //
  154. // detokenizes a vector of tokens into a string
  155. // should work similar to Python's `tokenizer.decode`
  156. // removes the leading space from the first non-BOS token
  157. std::string llama_detokenize_spm(
  158. llama_context * ctx,
  159. const std::vector<llama_token> & tokens);
  160. // detokenizes a vector of tokens into a string
  161. // should work similar to Python's `tokenizer.decode`
  162. std::string llama_detokenize_bpe(
  163. llama_context * ctx,
  164. const std::vector<llama_token> & tokens);
  165. // Uses the value from the model metadata if possible, otherwise
  166. // defaults to true when model type is SPM, otherwise false.
  167. bool llama_should_add_bos_token(const llama_model * model);
  168. //
  169. // YAML utils
  170. //
  171. bool create_directory_with_parents(const std::string & path);
  172. void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
  173. void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
  174. void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
  175. std::string get_sortable_timestamp();
  176. void dump_non_result_info_yaml(
  177. FILE * stream, const gpt_params & params, const llama_context * lctx,
  178. const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);