1
0

arg.cpp 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989
  1. #include "arg.h"
  2. #include "sampling.h"
  3. #include <algorithm>
  4. #include <string>
  5. #include <vector>
  6. #include <set>
  7. #include <fstream>
  8. #include <regex>
  9. #include <cstdarg>
  10. #include <climits>
  11. #include "json-schema-to-grammar.h"
  12. using json = nlohmann::ordered_json;
  13. llama_arg & llama_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  14. this->examples = std::move(examples);
  15. return *this;
  16. }
  17. llama_arg & llama_arg::set_env(const char * env) {
  18. help = help + "\n(env: " + env + ")";
  19. this->env = env;
  20. return *this;
  21. }
  22. llama_arg & llama_arg::set_sparam() {
  23. is_sparam = true;
  24. return *this;
  25. }
  26. bool llama_arg::in_example(enum llama_example ex) {
  27. return examples.find(ex) != examples.end();
  28. }
  29. bool llama_arg::get_value_from_env(std::string & output) {
  30. if (env == nullptr) return false;
  31. char * value = std::getenv(env);
  32. if (value) {
  33. output = value;
  34. return true;
  35. }
  36. return false;
  37. }
  38. bool llama_arg::has_value_from_env() {
  39. return env != nullptr && std::getenv(env);
  40. }
  41. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  42. std::vector<std::string> result;
  43. std::istringstream iss(input);
  44. std::string line;
  45. auto add_line = [&](const std::string& l) {
  46. if (l.length() <= max_char_per_line) {
  47. result.push_back(l);
  48. } else {
  49. std::istringstream line_stream(l);
  50. std::string word, current_line;
  51. while (line_stream >> word) {
  52. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  53. if (!current_line.empty()) result.push_back(current_line);
  54. current_line = word;
  55. } else {
  56. current_line += (!current_line.empty() ? " " : "") + word;
  57. }
  58. }
  59. if (!current_line.empty()) result.push_back(current_line);
  60. }
  61. };
  62. while (std::getline(iss, line)) {
  63. add_line(line);
  64. }
  65. return result;
  66. }
  67. std::string llama_arg::to_string() {
  68. // params for printing to console
  69. const static int n_leading_spaces = 40;
  70. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  71. std::string leading_spaces(n_leading_spaces, ' ');
  72. std::ostringstream ss;
  73. for (const auto arg : args) {
  74. if (arg == args.front()) {
  75. if (args.size() == 1) {
  76. ss << arg;
  77. } else {
  78. // first arg is usually abbreviation, we need padding to make it more beautiful
  79. auto tmp = std::string(arg) + ", ";
  80. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  81. ss << tmp << spaces;
  82. }
  83. } else {
  84. ss << arg << (arg != args.back() ? ", " : "");
  85. }
  86. }
  87. if (value_hint) ss << " " << value_hint;
  88. if (value_hint_2) ss << " " << value_hint_2;
  89. if (ss.tellp() > n_leading_spaces - 3) {
  90. // current line is too long, add new line
  91. ss << "\n" << leading_spaces;
  92. } else {
  93. // padding between arg and help, same line
  94. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  95. }
  96. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  97. for (const auto & line : help_lines) {
  98. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  99. }
  100. return ss.str();
  101. }
  102. //
  103. // utils
  104. //
  105. #ifdef __GNUC__
  106. #ifdef __MINGW32__
  107. #define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  108. #else
  109. #define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  110. #endif
  111. #else
  112. #define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
  113. #endif
  114. LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2)
  115. static std::string format(const char * fmt, ...) {
  116. va_list ap;
  117. va_list ap2;
  118. va_start(ap, fmt);
  119. va_copy(ap2, ap);
  120. int size = vsnprintf(NULL, 0, fmt, ap);
  121. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  122. std::vector<char> buf(size + 1);
  123. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  124. GGML_ASSERT(size2 == size);
  125. va_end(ap2);
  126. va_end(ap);
  127. return std::string(buf.data(), size);
  128. }
  129. static void gpt_params_handle_model_default(gpt_params & params) {
  130. if (!params.hf_repo.empty()) {
  131. // short-hand to avoid specifying --hf-file -> default it to --model
  132. if (params.hf_file.empty()) {
  133. if (params.model.empty()) {
  134. throw std::invalid_argument("error: --hf-repo requires either --hf-file or --model\n");
  135. }
  136. params.hf_file = params.model;
  137. } else if (params.model.empty()) {
  138. params.model = fs_get_cache_file(string_split(params.hf_file, '/').back());
  139. }
  140. } else if (!params.model_url.empty()) {
  141. if (params.model.empty()) {
  142. auto f = string_split(params.model_url, '#').front();
  143. f = string_split(f, '?').front();
  144. params.model = fs_get_cache_file(string_split(f, '/').back());
  145. }
  146. } else if (params.model.empty()) {
  147. params.model = DEFAULT_MODEL_PATH;
  148. }
  149. }
  150. //
  151. // CLI argument parsing functions
  152. //
  153. static bool gpt_params_parse_ex(int argc, char ** argv, gpt_params_context & ctx_arg) {
  154. std::string arg;
  155. const std::string arg_prefix = "--";
  156. gpt_params & params = ctx_arg.params;
  157. std::unordered_map<std::string, llama_arg *> arg_to_options;
  158. for (auto & opt : ctx_arg.options) {
  159. for (const auto & arg : opt.args) {
  160. arg_to_options[arg] = &opt;
  161. }
  162. }
  163. // handle environment variables
  164. for (auto & opt : ctx_arg.options) {
  165. std::string value;
  166. if (opt.get_value_from_env(value)) {
  167. try {
  168. if (opt.handler_void && (value == "1" || value == "true")) {
  169. opt.handler_void(params);
  170. }
  171. if (opt.handler_int) {
  172. opt.handler_int(params, std::stoi(value));
  173. }
  174. if (opt.handler_string) {
  175. opt.handler_string(params, value);
  176. continue;
  177. }
  178. } catch (std::exception & e) {
  179. throw std::invalid_argument(format(
  180. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  181. }
  182. }
  183. }
  184. // handle command line arguments
  185. auto check_arg = [&](int i) {
  186. if (i+1 >= argc) {
  187. throw std::invalid_argument("expected value for argument");
  188. }
  189. };
  190. for (int i = 1; i < argc; i++) {
  191. const std::string arg_prefix = "--";
  192. std::string arg = argv[i];
  193. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  194. std::replace(arg.begin(), arg.end(), '_', '-');
  195. }
  196. if (arg_to_options.find(arg) == arg_to_options.end()) {
  197. throw std::invalid_argument(format("error: invalid argument: %s", arg.c_str()));
  198. }
  199. auto opt = *arg_to_options[arg];
  200. if (opt.has_value_from_env()) {
  201. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  202. }
  203. try {
  204. if (opt.handler_void) {
  205. opt.handler_void(params);
  206. continue;
  207. }
  208. // arg with single value
  209. check_arg(i);
  210. std::string val = argv[++i];
  211. if (opt.handler_int) {
  212. opt.handler_int(params, std::stoi(val));
  213. continue;
  214. }
  215. if (opt.handler_string) {
  216. opt.handler_string(params, val);
  217. continue;
  218. }
  219. // arg with 2 values
  220. check_arg(i);
  221. std::string val2 = argv[++i];
  222. if (opt.handler_str_str) {
  223. opt.handler_str_str(params, val, val2);
  224. continue;
  225. }
  226. } catch (std::exception & e) {
  227. throw std::invalid_argument(format(
  228. "error while handling argument \"%s\": %s\n\n"
  229. "usage:\n%s\n\nto show complete usage, run with -h",
  230. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  231. }
  232. }
  233. postprocess_cpu_params(params.cpuparams, nullptr);
  234. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  235. postprocess_cpu_params(params.draft_cpuparams, &params.cpuparams);
  236. postprocess_cpu_params(params.draft_cpuparams_batch, &params.cpuparams_batch);
  237. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  238. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  239. }
  240. gpt_params_handle_model_default(params);
  241. if (params.escape) {
  242. string_process_escapes(params.prompt);
  243. string_process_escapes(params.input_prefix);
  244. string_process_escapes(params.input_suffix);
  245. for (auto & antiprompt : params.antiprompt) {
  246. string_process_escapes(antiprompt);
  247. }
  248. }
  249. if (!params.kv_overrides.empty()) {
  250. params.kv_overrides.emplace_back();
  251. params.kv_overrides.back().key[0] = 0;
  252. }
  253. return true;
  254. }
  255. static void gpt_params_print_usage(gpt_params_context & ctx_arg) {
  256. auto print_options = [](std::vector<llama_arg *> & options) {
  257. for (llama_arg * opt : options) {
  258. printf("%s", opt->to_string().c_str());
  259. }
  260. };
  261. std::vector<llama_arg *> common_options;
  262. std::vector<llama_arg *> sparam_options;
  263. std::vector<llama_arg *> specific_options;
  264. for (auto & opt : ctx_arg.options) {
  265. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  266. if (opt.is_sparam) {
  267. sparam_options.push_back(&opt);
  268. } else if (opt.in_example(ctx_arg.ex)) {
  269. specific_options.push_back(&opt);
  270. } else {
  271. common_options.push_back(&opt);
  272. }
  273. }
  274. printf("----- common params -----\n\n");
  275. print_options(common_options);
  276. printf("\n\n----- sampling params -----\n\n");
  277. print_options(sparam_options);
  278. // TODO: maybe convert enum llama_example to string
  279. printf("\n\n----- example-specific params -----\n\n");
  280. print_options(specific_options);
  281. }
  282. bool gpt_params_parse(int argc, char ** argv, gpt_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  283. auto ctx_arg = gpt_params_parser_init(params, ex, print_usage);
  284. const gpt_params params_org = ctx_arg.params; // the example can modify the default params
  285. try {
  286. if (!gpt_params_parse_ex(argc, argv, ctx_arg)) {
  287. ctx_arg.params = params_org;
  288. return false;
  289. }
  290. if (ctx_arg.params.usage) {
  291. gpt_params_print_usage(ctx_arg);
  292. if (ctx_arg.print_usage) {
  293. ctx_arg.print_usage(argc, argv);
  294. }
  295. exit(0);
  296. }
  297. } catch (const std::invalid_argument & ex) {
  298. fprintf(stderr, "%s\n", ex.what());
  299. ctx_arg.params = params_org;
  300. return false;
  301. }
  302. return true;
  303. }
  304. gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  305. gpt_params_context ctx_arg(params);
  306. ctx_arg.print_usage = print_usage;
  307. ctx_arg.ex = ex;
  308. std::string sampler_type_chars;
  309. std::string sampler_type_names;
  310. for (const auto & sampler : params.sparams.samplers) {
  311. sampler_type_chars += gpt_sampler_type_to_chr(sampler);
  312. sampler_type_names += gpt_sampler_type_to_str(sampler) + ";";
  313. }
  314. sampler_type_names.pop_back();
  315. /**
  316. * filter options by example
  317. * rules:
  318. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  319. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  320. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  321. */
  322. auto add_opt = [&](llama_arg arg) {
  323. if (arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) {
  324. ctx_arg.options.push_back(std::move(arg));
  325. }
  326. };
  327. add_opt(llama_arg(
  328. {"-h", "--help", "--usage"},
  329. "print usage and exit",
  330. [](gpt_params & params) {
  331. params.usage = true;
  332. }
  333. ));
  334. add_opt(llama_arg(
  335. {"--version"},
  336. "show version and build info",
  337. [](gpt_params &) {
  338. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  339. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  340. exit(0);
  341. }
  342. ));
  343. add_opt(llama_arg(
  344. {"-v", "--verbose"},
  345. "print verbose information",
  346. [](gpt_params & params) {
  347. params.verbosity = 1;
  348. }
  349. ));
  350. add_opt(llama_arg(
  351. {"--verbosity"}, "N",
  352. format("set specific verbosity level (default: %d)", params.verbosity),
  353. [](gpt_params & params, int value) {
  354. params.verbosity = value;
  355. }
  356. ));
  357. add_opt(llama_arg(
  358. {"--verbose-prompt"},
  359. format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  360. [](gpt_params & params) {
  361. params.verbose_prompt = true;
  362. }
  363. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  364. add_opt(llama_arg(
  365. {"--no-display-prompt"},
  366. format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  367. [](gpt_params & params) {
  368. params.display_prompt = false;
  369. }
  370. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  371. add_opt(llama_arg(
  372. {"-co", "--color"},
  373. format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"),
  374. [](gpt_params & params) {
  375. params.use_color = true;
  376. }
  377. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  378. add_opt(llama_arg(
  379. {"-t", "--threads"}, "N",
  380. format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
  381. [](gpt_params & params, int value) {
  382. params.cpuparams.n_threads = value;
  383. if (params.cpuparams.n_threads <= 0) {
  384. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  385. }
  386. }
  387. ).set_env("LLAMA_ARG_THREADS"));
  388. add_opt(llama_arg(
  389. {"-tb", "--threads-batch"}, "N",
  390. "number of threads to use during batch and prompt processing (default: same as --threads)",
  391. [](gpt_params & params, int value) {
  392. params.cpuparams_batch.n_threads = value;
  393. if (params.cpuparams_batch.n_threads <= 0) {
  394. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  395. }
  396. }
  397. ));
  398. add_opt(llama_arg(
  399. {"-td", "--threads-draft"}, "N",
  400. "number of threads to use during generation (default: same as --threads)",
  401. [](gpt_params & params, int value) {
  402. params.draft_cpuparams.n_threads = value;
  403. if (params.draft_cpuparams.n_threads <= 0) {
  404. params.draft_cpuparams.n_threads = std::thread::hardware_concurrency();
  405. }
  406. }
  407. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  408. add_opt(llama_arg(
  409. {"-tbd", "--threads-batch-draft"}, "N",
  410. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  411. [](gpt_params & params, int value) {
  412. params.draft_cpuparams_batch.n_threads = value;
  413. if (params.draft_cpuparams_batch.n_threads <= 0) {
  414. params.draft_cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  415. }
  416. }
  417. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  418. add_opt(llama_arg(
  419. {"-C", "--cpu-mask"}, "M",
  420. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  421. [](gpt_params & params, const std::string & mask) {
  422. params.cpuparams.mask_valid = true;
  423. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  424. throw std::invalid_argument("invalid cpumask");
  425. }
  426. }
  427. ));
  428. add_opt(llama_arg(
  429. {"-Cr", "--cpu-range"}, "lo-hi",
  430. "range of CPUs for affinity. Complements --cpu-mask",
  431. [](gpt_params & params, const std::string & range) {
  432. params.cpuparams.mask_valid = true;
  433. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  434. throw std::invalid_argument("invalid range");
  435. }
  436. }
  437. ));
  438. add_opt(llama_arg(
  439. {"--cpu-strict"}, "<0|1>",
  440. format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  441. [](gpt_params & params, const std::string & value) {
  442. params.cpuparams.strict_cpu = std::stoul(value);
  443. }
  444. ));
  445. add_opt(llama_arg(
  446. {"--prio"}, "N",
  447. format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams.priority),
  448. [](gpt_params & params, int prio) {
  449. if (prio < 0 || prio > 3) {
  450. throw std::invalid_argument("invalid value");
  451. }
  452. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  453. }
  454. ));
  455. add_opt(llama_arg(
  456. {"--poll"}, "<0...100>",
  457. format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  458. [](gpt_params & params, const std::string & value) {
  459. params.cpuparams.poll = std::stoul(value);
  460. }
  461. ));
  462. add_opt(llama_arg(
  463. {"-Cb", "--cpu-mask-batch"}, "M",
  464. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  465. [](gpt_params & params, const std::string & mask) {
  466. params.cpuparams_batch.mask_valid = true;
  467. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  468. throw std::invalid_argument("invalid cpumask");
  469. }
  470. }
  471. ));
  472. add_opt(llama_arg(
  473. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  474. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  475. [](gpt_params & params, const std::string & range) {
  476. params.cpuparams_batch.mask_valid = true;
  477. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  478. throw std::invalid_argument("invalid range");
  479. }
  480. }
  481. ));
  482. add_opt(llama_arg(
  483. {"--cpu-strict-batch"}, "<0|1>",
  484. "use strict CPU placement (default: same as --cpu-strict)",
  485. [](gpt_params & params, int value) {
  486. params.cpuparams_batch.strict_cpu = value;
  487. }
  488. ));
  489. add_opt(llama_arg(
  490. {"--prio-batch"}, "N",
  491. format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  492. [](gpt_params & params, int prio) {
  493. if (prio < 0 || prio > 3) {
  494. throw std::invalid_argument("invalid value");
  495. }
  496. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  497. }
  498. ));
  499. add_opt(llama_arg(
  500. {"--poll-batch"}, "<0|1>",
  501. "use polling to wait for work (default: same as --poll)",
  502. [](gpt_params & params, int value) {
  503. params.cpuparams_batch.poll = value;
  504. }
  505. ));
  506. add_opt(llama_arg(
  507. {"-Cd", "--cpu-mask-draft"}, "M",
  508. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  509. [](gpt_params & params, const std::string & mask) {
  510. params.draft_cpuparams.mask_valid = true;
  511. if (!parse_cpu_mask(mask, params.draft_cpuparams.cpumask)) {
  512. throw std::invalid_argument("invalid cpumask");
  513. }
  514. }
  515. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  516. add_opt(llama_arg(
  517. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  518. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  519. [](gpt_params & params, const std::string & range) {
  520. params.draft_cpuparams.mask_valid = true;
  521. if (!parse_cpu_range(range, params.draft_cpuparams.cpumask)) {
  522. throw std::invalid_argument("invalid range");
  523. }
  524. }
  525. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  526. add_opt(llama_arg(
  527. {"--cpu-strict-draft"}, "<0|1>",
  528. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  529. [](gpt_params & params, int value) {
  530. params.draft_cpuparams.strict_cpu = value;
  531. }
  532. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  533. add_opt(llama_arg(
  534. {"--prio-draft"}, "N",
  535. format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams.priority),
  536. [](gpt_params & params, int prio) {
  537. if (prio < 0 || prio > 3) {
  538. throw std::invalid_argument("invalid value");
  539. }
  540. params.draft_cpuparams.priority = (enum ggml_sched_priority) prio;
  541. }
  542. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  543. add_opt(llama_arg(
  544. {"--poll-draft"}, "<0|1>",
  545. "Use polling to wait for draft model work (default: same as --poll])",
  546. [](gpt_params & params, int value) {
  547. params.draft_cpuparams.poll = value;
  548. }
  549. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  550. add_opt(llama_arg(
  551. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  552. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  553. [](gpt_params & params, const std::string & mask) {
  554. params.draft_cpuparams_batch.mask_valid = true;
  555. if (!parse_cpu_mask(mask, params.draft_cpuparams_batch.cpumask)) {
  556. throw std::invalid_argument("invalid cpumask");
  557. }
  558. }
  559. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  560. add_opt(llama_arg(
  561. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  562. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  563. [](gpt_params & params, const std::string & range) {
  564. params.draft_cpuparams_batch.mask_valid = true;
  565. if (!parse_cpu_range(range, params.draft_cpuparams_batch.cpumask)) {
  566. throw std::invalid_argument("invalid cpumask");
  567. }
  568. }
  569. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  570. add_opt(llama_arg(
  571. {"--cpu-strict-batch-draft"}, "<0|1>",
  572. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  573. [](gpt_params & params, int value) {
  574. params.draft_cpuparams_batch.strict_cpu = value;
  575. }
  576. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  577. add_opt(llama_arg(
  578. {"--prio-batch-draft"}, "N",
  579. format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams_batch.priority),
  580. [](gpt_params & params, int prio) {
  581. if (prio < 0 || prio > 3) {
  582. throw std::invalid_argument("invalid value");
  583. }
  584. params.draft_cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  585. }
  586. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  587. add_opt(llama_arg(
  588. {"--poll-batch-draft"}, "<0|1>",
  589. "Use polling to wait for draft model work (default: --poll-draft)",
  590. [](gpt_params & params, int value) {
  591. params.draft_cpuparams_batch.poll = value;
  592. }
  593. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  594. add_opt(llama_arg(
  595. {"--draft"}, "N",
  596. format("number of tokens to draft for speculative decoding (default: %d)", params.n_draft),
  597. [](gpt_params & params, int value) {
  598. params.n_draft = value;
  599. }
  600. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  601. add_opt(llama_arg(
  602. {"-ps", "--p-split"}, "N",
  603. format("speculative decoding split probability (default: %.1f)", (double)params.p_split),
  604. [](gpt_params & params, const std::string & value) {
  605. params.p_split = std::stof(value);
  606. }
  607. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  608. add_opt(llama_arg(
  609. {"-lcs", "--lookup-cache-static"}, "FNAME",
  610. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  611. [](gpt_params & params, const std::string & value) {
  612. params.lookup_cache_static = value;
  613. }
  614. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  615. add_opt(llama_arg(
  616. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  617. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  618. [](gpt_params & params, const std::string & value) {
  619. params.lookup_cache_dynamic = value;
  620. }
  621. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  622. add_opt(llama_arg(
  623. {"-c", "--ctx-size"}, "N",
  624. format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  625. [](gpt_params & params, int value) {
  626. params.n_ctx = value;
  627. }
  628. ).set_env("LLAMA_ARG_CTX_SIZE"));
  629. add_opt(llama_arg(
  630. {"-n", "--predict", "--n-predict"}, "N",
  631. format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
  632. [](gpt_params & params, int value) {
  633. params.n_predict = value;
  634. }
  635. ).set_env("LLAMA_ARG_N_PREDICT"));
  636. add_opt(llama_arg(
  637. {"-b", "--batch-size"}, "N",
  638. format("logical maximum batch size (default: %d)", params.n_batch),
  639. [](gpt_params & params, int value) {
  640. params.n_batch = value;
  641. }
  642. ).set_env("LLAMA_ARG_BATCH"));
  643. add_opt(llama_arg(
  644. {"-ub", "--ubatch-size"}, "N",
  645. format("physical maximum batch size (default: %d)", params.n_ubatch),
  646. [](gpt_params & params, int value) {
  647. params.n_ubatch = value;
  648. }
  649. ).set_env("LLAMA_ARG_UBATCH"));
  650. add_opt(llama_arg(
  651. {"--keep"}, "N",
  652. format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  653. [](gpt_params & params, int value) {
  654. params.n_keep = value;
  655. }
  656. ));
  657. add_opt(llama_arg(
  658. {"--chunks"}, "N",
  659. format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  660. [](gpt_params & params, int value) {
  661. params.n_chunks = value;
  662. }
  663. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  664. add_opt(llama_arg(
  665. {"-fa", "--flash-attn"},
  666. format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
  667. [](gpt_params & params) {
  668. params.flash_attn = true;
  669. }
  670. ).set_env("LLAMA_ARG_FLASH_ATTN"));
  671. add_opt(llama_arg(
  672. {"-p", "--prompt"}, "PROMPT",
  673. ex == LLAMA_EXAMPLE_MAIN
  674. ? "prompt to start generation with\nif -cnv is set, this will be used as system prompt"
  675. : "prompt to start generation with",
  676. [](gpt_params & params, const std::string & value) {
  677. params.prompt = value;
  678. }
  679. ));
  680. add_opt(llama_arg(
  681. {"-f", "--file"}, "FNAME",
  682. "a file containing the prompt (default: none)",
  683. [](gpt_params & params, const std::string & value) {
  684. std::ifstream file(value);
  685. if (!file) {
  686. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  687. }
  688. // store the external file name in params
  689. params.prompt_file = value;
  690. std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
  691. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  692. params.prompt.pop_back();
  693. }
  694. }
  695. ));
  696. add_opt(llama_arg(
  697. {"--in-file"}, "FNAME",
  698. "an input file (repeat to specify multiple files)",
  699. [](gpt_params & params, const std::string & value) {
  700. std::ifstream file(value);
  701. if (!file) {
  702. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  703. }
  704. params.in_files.push_back(value);
  705. }
  706. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  707. add_opt(llama_arg(
  708. {"-bf", "--binary-file"}, "FNAME",
  709. "binary file containing the prompt (default: none)",
  710. [](gpt_params & params, const std::string & value) {
  711. std::ifstream file(value, std::ios::binary);
  712. if (!file) {
  713. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  714. }
  715. // store the external file name in params
  716. params.prompt_file = value;
  717. std::ostringstream ss;
  718. ss << file.rdbuf();
  719. params.prompt = ss.str();
  720. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  721. }
  722. ));
  723. add_opt(llama_arg(
  724. {"-e", "--escape"},
  725. format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  726. [](gpt_params & params) {
  727. params.escape = true;
  728. }
  729. ));
  730. add_opt(llama_arg(
  731. {"--no-escape"},
  732. "do not process escape sequences",
  733. [](gpt_params & params) {
  734. params.escape = false;
  735. }
  736. ));
  737. add_opt(llama_arg(
  738. {"-ptc", "--print-token-count"}, "N",
  739. format("print token count every N tokens (default: %d)", params.n_print),
  740. [](gpt_params & params, int value) {
  741. params.n_print = value;
  742. }
  743. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  744. add_opt(llama_arg(
  745. {"--prompt-cache"}, "FNAME",
  746. "file to cache prompt state for faster startup (default: none)",
  747. [](gpt_params & params, const std::string & value) {
  748. params.path_prompt_cache = value;
  749. }
  750. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  751. add_opt(llama_arg(
  752. {"--prompt-cache-all"},
  753. "if specified, saves user input and generations to cache as well\n",
  754. [](gpt_params & params) {
  755. params.prompt_cache_all = true;
  756. }
  757. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  758. add_opt(llama_arg(
  759. {"--prompt-cache-ro"},
  760. "if specified, uses the prompt cache but does not update it",
  761. [](gpt_params & params) {
  762. params.prompt_cache_ro = true;
  763. }
  764. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  765. add_opt(llama_arg(
  766. {"-r", "--reverse-prompt"}, "PROMPT",
  767. "halt generation at PROMPT, return control in interactive mode\n",
  768. [](gpt_params & params, const std::string & value) {
  769. params.antiprompt.emplace_back(value);
  770. }
  771. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  772. add_opt(llama_arg(
  773. {"-sp", "--special"},
  774. format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  775. [](gpt_params & params) {
  776. params.special = true;
  777. }
  778. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  779. add_opt(llama_arg(
  780. {"-cnv", "--conversation"},
  781. format(
  782. "run in conversation mode:\n"
  783. "- does not print special tokens and suffix/prefix\n"
  784. "- interactive mode is also enabled\n"
  785. "(default: %s)",
  786. params.conversation ? "true" : "false"
  787. ),
  788. [](gpt_params & params) {
  789. params.conversation = true;
  790. }
  791. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  792. add_opt(llama_arg(
  793. {"-i", "--interactive"},
  794. format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  795. [](gpt_params & params) {
  796. params.interactive = true;
  797. }
  798. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  799. add_opt(llama_arg(
  800. {"-if", "--interactive-first"},
  801. format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  802. [](gpt_params & params) {
  803. params.interactive_first = true;
  804. }
  805. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  806. add_opt(llama_arg(
  807. {"-mli", "--multiline-input"},
  808. "allows you to write or paste multiple lines without ending each in '\\'",
  809. [](gpt_params & params) {
  810. params.multiline_input = true;
  811. }
  812. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  813. add_opt(llama_arg(
  814. {"--in-prefix-bos"},
  815. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  816. [](gpt_params & params) {
  817. params.input_prefix_bos = true;
  818. params.enable_chat_template = false;
  819. }
  820. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  821. add_opt(llama_arg(
  822. {"--in-prefix"}, "STRING",
  823. "string to prefix user inputs with (default: empty)",
  824. [](gpt_params & params, const std::string & value) {
  825. params.input_prefix = value;
  826. params.enable_chat_template = false;
  827. }
  828. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  829. add_opt(llama_arg(
  830. {"--in-suffix"}, "STRING",
  831. "string to suffix after user inputs with (default: empty)",
  832. [](gpt_params & params, const std::string & value) {
  833. params.input_suffix = value;
  834. params.enable_chat_template = false;
  835. }
  836. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  837. add_opt(llama_arg(
  838. {"--no-warmup"},
  839. "skip warming up the model with an empty run",
  840. [](gpt_params & params) {
  841. params.warmup = false;
  842. }
  843. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  844. add_opt(llama_arg(
  845. {"--spm-infill"},
  846. format(
  847. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  848. params.spm_infill ? "enabled" : "disabled"
  849. ),
  850. [](gpt_params & params) {
  851. params.spm_infill = true;
  852. }
  853. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
  854. add_opt(llama_arg(
  855. {"--samplers"}, "SAMPLERS",
  856. format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  857. [](gpt_params & params, const std::string & value) {
  858. const auto sampler_names = string_split(value, ';');
  859. params.sparams.samplers = gpt_sampler_types_from_names(sampler_names, true);
  860. }
  861. ).set_sparam());
  862. add_opt(llama_arg(
  863. {"-s", "--seed"}, "SEED",
  864. format("RNG seed (default: %u, use random seed for %u)", params.sparams.seed, LLAMA_DEFAULT_SEED),
  865. [](gpt_params & params, const std::string & value) {
  866. params.sparams.seed = std::stoul(value);
  867. }
  868. ).set_sparam());
  869. add_opt(llama_arg(
  870. {"--sampling-seq"}, "SEQUENCE",
  871. format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  872. [](gpt_params & params, const std::string & value) {
  873. params.sparams.samplers = gpt_sampler_types_from_chars(value);
  874. }
  875. ).set_sparam());
  876. add_opt(llama_arg(
  877. {"--ignore-eos"},
  878. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  879. [](gpt_params & params) {
  880. params.sparams.ignore_eos = true;
  881. }
  882. ).set_sparam());
  883. add_opt(llama_arg(
  884. {"--penalize-nl"},
  885. format("penalize newline tokens (default: %s)", params.sparams.penalize_nl ? "true" : "false"),
  886. [](gpt_params & params) {
  887. params.sparams.penalize_nl = true;
  888. }
  889. ).set_sparam());
  890. add_opt(llama_arg(
  891. {"--temp"}, "N",
  892. format("temperature (default: %.1f)", (double)params.sparams.temp),
  893. [](gpt_params & params, const std::string & value) {
  894. params.sparams.temp = std::stof(value);
  895. params.sparams.temp = std::max(params.sparams.temp, 0.0f);
  896. }
  897. ).set_sparam());
  898. add_opt(llama_arg(
  899. {"--top-k"}, "N",
  900. format("top-k sampling (default: %d, 0 = disabled)", params.sparams.top_k),
  901. [](gpt_params & params, int value) {
  902. params.sparams.top_k = value;
  903. }
  904. ).set_sparam());
  905. add_opt(llama_arg(
  906. {"--top-p"}, "N",
  907. format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sparams.top_p),
  908. [](gpt_params & params, const std::string & value) {
  909. params.sparams.top_p = std::stof(value);
  910. }
  911. ).set_sparam());
  912. add_opt(llama_arg(
  913. {"--min-p"}, "N",
  914. format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sparams.min_p),
  915. [](gpt_params & params, const std::string & value) {
  916. params.sparams.min_p = std::stof(value);
  917. }
  918. ).set_sparam());
  919. add_opt(llama_arg(
  920. {"--tfs"}, "N",
  921. format("tail free sampling, parameter z (default: %.1f, 1.0 = disabled)", (double)params.sparams.tfs_z),
  922. [](gpt_params & params, const std::string & value) {
  923. params.sparams.tfs_z = std::stof(value);
  924. }
  925. ).set_sparam());
  926. add_opt(llama_arg(
  927. {"--typical"}, "N",
  928. format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sparams.typ_p),
  929. [](gpt_params & params, const std::string & value) {
  930. params.sparams.typ_p = std::stof(value);
  931. }
  932. ).set_sparam());
  933. add_opt(llama_arg(
  934. {"--repeat-last-n"}, "N",
  935. format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sparams.penalty_last_n),
  936. [](gpt_params & params, int value) {
  937. params.sparams.penalty_last_n = value;
  938. params.sparams.n_prev = std::max(params.sparams.n_prev, params.sparams.penalty_last_n);
  939. }
  940. ).set_sparam());
  941. add_opt(llama_arg(
  942. {"--repeat-penalty"}, "N",
  943. format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sparams.penalty_repeat),
  944. [](gpt_params & params, const std::string & value) {
  945. params.sparams.penalty_repeat = std::stof(value);
  946. }
  947. ).set_sparam());
  948. add_opt(llama_arg(
  949. {"--presence-penalty"}, "N",
  950. format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sparams.penalty_present),
  951. [](gpt_params & params, const std::string & value) {
  952. params.sparams.penalty_present = std::stof(value);
  953. }
  954. ).set_sparam());
  955. add_opt(llama_arg(
  956. {"--frequency-penalty"}, "N",
  957. format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sparams.penalty_freq),
  958. [](gpt_params & params, const std::string & value) {
  959. params.sparams.penalty_freq = std::stof(value);
  960. }
  961. ).set_sparam());
  962. add_opt(llama_arg(
  963. {"--dynatemp-range"}, "N",
  964. format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sparams.dynatemp_range),
  965. [](gpt_params & params, const std::string & value) {
  966. params.sparams.dynatemp_range = std::stof(value);
  967. }
  968. ).set_sparam());
  969. add_opt(llama_arg(
  970. {"--dynatemp-exp"}, "N",
  971. format("dynamic temperature exponent (default: %.1f)", (double)params.sparams.dynatemp_exponent),
  972. [](gpt_params & params, const std::string & value) {
  973. params.sparams.dynatemp_exponent = std::stof(value);
  974. }
  975. ).set_sparam());
  976. add_opt(llama_arg(
  977. {"--mirostat"}, "N",
  978. format("use Mirostat sampling.\nTop K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"
  979. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sparams.mirostat),
  980. [](gpt_params & params, int value) {
  981. params.sparams.mirostat = value;
  982. }
  983. ).set_sparam());
  984. add_opt(llama_arg(
  985. {"--mirostat-lr"}, "N",
  986. format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sparams.mirostat_eta),
  987. [](gpt_params & params, const std::string & value) {
  988. params.sparams.mirostat_eta = std::stof(value);
  989. }
  990. ).set_sparam());
  991. add_opt(llama_arg(
  992. {"--mirostat-ent"}, "N",
  993. format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sparams.mirostat_tau),
  994. [](gpt_params & params, const std::string & value) {
  995. params.sparams.mirostat_tau = std::stof(value);
  996. }
  997. ).set_sparam());
  998. add_opt(llama_arg(
  999. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1000. "modifies the likelihood of token appearing in the completion,\n"
  1001. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1002. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1003. [](gpt_params & params, const std::string & value) {
  1004. std::stringstream ss(value);
  1005. llama_token key;
  1006. char sign;
  1007. std::string value_str;
  1008. try {
  1009. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1010. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1011. params.sparams.logit_bias.push_back({key, bias});
  1012. } else {
  1013. throw std::invalid_argument("invalid input format");
  1014. }
  1015. } catch (const std::exception&) {
  1016. throw std::invalid_argument("invalid input format");
  1017. }
  1018. }
  1019. ).set_sparam());
  1020. add_opt(llama_arg(
  1021. {"--grammar"}, "GRAMMAR",
  1022. format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sparams.grammar.c_str()),
  1023. [](gpt_params & params, const std::string & value) {
  1024. params.sparams.grammar = value;
  1025. }
  1026. ).set_sparam());
  1027. add_opt(llama_arg(
  1028. {"--grammar-file"}, "FNAME",
  1029. "file to read grammar from",
  1030. [](gpt_params & params, const std::string & value) {
  1031. std::ifstream file(value);
  1032. if (!file) {
  1033. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  1034. }
  1035. std::copy(
  1036. std::istreambuf_iterator<char>(file),
  1037. std::istreambuf_iterator<char>(),
  1038. std::back_inserter(params.sparams.grammar)
  1039. );
  1040. }
  1041. ).set_sparam());
  1042. add_opt(llama_arg(
  1043. {"-j", "--json-schema"}, "SCHEMA",
  1044. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1045. [](gpt_params & params, const std::string & value) {
  1046. params.sparams.grammar = json_schema_to_grammar(json::parse(value));
  1047. }
  1048. ).set_sparam());
  1049. add_opt(llama_arg(
  1050. {"--pooling"}, "{none,mean,cls,last}",
  1051. "pooling type for embeddings, use model default if unspecified",
  1052. [](gpt_params & params, const std::string & value) {
  1053. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1054. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1055. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1056. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1057. else { throw std::invalid_argument("invalid value"); }
  1058. }
  1059. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1060. add_opt(llama_arg(
  1061. {"--attention"}, "{causal,non,causal}",
  1062. "attention type for embeddings, use model default if unspecified",
  1063. [](gpt_params & params, const std::string & value) {
  1064. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1065. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1066. else { throw std::invalid_argument("invalid value"); }
  1067. }
  1068. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1069. add_opt(llama_arg(
  1070. {"--rope-scaling"}, "{none,linear,yarn}",
  1071. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1072. [](gpt_params & params, const std::string & value) {
  1073. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1074. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1075. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1076. else { throw std::invalid_argument("invalid value"); }
  1077. }
  1078. ));
  1079. add_opt(llama_arg(
  1080. {"--rope-scale"}, "N",
  1081. "RoPE context scaling factor, expands context by a factor of N",
  1082. [](gpt_params & params, const std::string & value) {
  1083. params.rope_freq_scale = 1.0f / std::stof(value);
  1084. }
  1085. ));
  1086. add_opt(llama_arg(
  1087. {"--rope-freq-base"}, "N",
  1088. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1089. [](gpt_params & params, const std::string & value) {
  1090. params.rope_freq_base = std::stof(value);
  1091. }
  1092. ));
  1093. add_opt(llama_arg(
  1094. {"--rope-freq-scale"}, "N",
  1095. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1096. [](gpt_params & params, const std::string & value) {
  1097. params.rope_freq_scale = std::stof(value);
  1098. }
  1099. ));
  1100. add_opt(llama_arg(
  1101. {"--yarn-orig-ctx"}, "N",
  1102. format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1103. [](gpt_params & params, int value) {
  1104. params.yarn_orig_ctx = value;
  1105. }
  1106. ));
  1107. add_opt(llama_arg(
  1108. {"--yarn-ext-factor"}, "N",
  1109. format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1110. [](gpt_params & params, const std::string & value) {
  1111. params.yarn_ext_factor = std::stof(value);
  1112. }
  1113. ));
  1114. add_opt(llama_arg(
  1115. {"--yarn-attn-factor"}, "N",
  1116. format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1117. [](gpt_params & params, const std::string & value) {
  1118. params.yarn_attn_factor = std::stof(value);
  1119. }
  1120. ));
  1121. add_opt(llama_arg(
  1122. {"--yarn-beta-slow"}, "N",
  1123. format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1124. [](gpt_params & params, const std::string & value) {
  1125. params.yarn_beta_slow = std::stof(value);
  1126. }
  1127. ));
  1128. add_opt(llama_arg(
  1129. {"--yarn-beta-fast"}, "N",
  1130. format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1131. [](gpt_params & params, const std::string & value) {
  1132. params.yarn_beta_fast = std::stof(value);
  1133. }
  1134. ));
  1135. add_opt(llama_arg(
  1136. {"-gan", "--grp-attn-n"}, "N",
  1137. format("group-attention factor (default: %d)", params.grp_attn_n),
  1138. [](gpt_params & params, int value) {
  1139. params.grp_attn_n = value;
  1140. }
  1141. ));
  1142. add_opt(llama_arg(
  1143. {"-gaw", "--grp-attn-w"}, "N",
  1144. format("group-attention width (default: %.1f)", (double)params.grp_attn_w),
  1145. [](gpt_params & params, int value) {
  1146. params.grp_attn_w = value;
  1147. }
  1148. ));
  1149. add_opt(llama_arg(
  1150. {"-dkvc", "--dump-kv-cache"},
  1151. "verbose print of the KV cache",
  1152. [](gpt_params & params) {
  1153. params.dump_kv_cache = true;
  1154. }
  1155. ));
  1156. add_opt(llama_arg(
  1157. {"-nkvo", "--no-kv-offload"},
  1158. "disable KV offload",
  1159. [](gpt_params & params) {
  1160. params.no_kv_offload = true;
  1161. }
  1162. ));
  1163. add_opt(llama_arg(
  1164. {"-ctk", "--cache-type-k"}, "TYPE",
  1165. format("KV cache data type for K (default: %s)", params.cache_type_k.c_str()),
  1166. [](gpt_params & params, const std::string & value) {
  1167. // TODO: get the type right here
  1168. params.cache_type_k = value;
  1169. }
  1170. ));
  1171. add_opt(llama_arg(
  1172. {"-ctv", "--cache-type-v"}, "TYPE",
  1173. format("KV cache data type for V (default: %s)", params.cache_type_v.c_str()),
  1174. [](gpt_params & params, const std::string & value) {
  1175. // TODO: get the type right here
  1176. params.cache_type_v = value;
  1177. }
  1178. ));
  1179. add_opt(llama_arg(
  1180. {"--perplexity", "--all-logits"},
  1181. format("return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false"),
  1182. [](gpt_params & params) {
  1183. params.logits_all = true;
  1184. }
  1185. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1186. add_opt(llama_arg(
  1187. {"--hellaswag"},
  1188. "compute HellaSwag score over random tasks from datafile supplied with -f",
  1189. [](gpt_params & params) {
  1190. params.hellaswag = true;
  1191. }
  1192. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1193. add_opt(llama_arg(
  1194. {"--hellaswag-tasks"}, "N",
  1195. format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  1196. [](gpt_params & params, int value) {
  1197. params.hellaswag_tasks = value;
  1198. }
  1199. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1200. add_opt(llama_arg(
  1201. {"--winogrande"},
  1202. "compute Winogrande score over random tasks from datafile supplied with -f",
  1203. [](gpt_params & params) {
  1204. params.winogrande = true;
  1205. }
  1206. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1207. add_opt(llama_arg(
  1208. {"--winogrande-tasks"}, "N",
  1209. format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  1210. [](gpt_params & params, int value) {
  1211. params.winogrande_tasks = value;
  1212. }
  1213. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1214. add_opt(llama_arg(
  1215. {"--multiple-choice"},
  1216. "compute multiple choice score over random tasks from datafile supplied with -f",
  1217. [](gpt_params & params) {
  1218. params.multiple_choice = true;
  1219. }
  1220. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1221. add_opt(llama_arg(
  1222. {"--multiple-choice-tasks"}, "N",
  1223. format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  1224. [](gpt_params & params, int value) {
  1225. params.multiple_choice_tasks = value;
  1226. }
  1227. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1228. add_opt(llama_arg(
  1229. {"--kl-divergence"},
  1230. "computes KL-divergence to logits provided via --kl-divergence-base",
  1231. [](gpt_params & params) {
  1232. params.kl_divergence = true;
  1233. }
  1234. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1235. add_opt(llama_arg(
  1236. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  1237. "set logits file",
  1238. [](gpt_params & params, const std::string & value) {
  1239. params.logits_file = value;
  1240. }
  1241. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1242. add_opt(llama_arg(
  1243. {"--ppl-stride"}, "N",
  1244. format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  1245. [](gpt_params & params, int value) {
  1246. params.ppl_stride = value;
  1247. }
  1248. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1249. add_opt(llama_arg(
  1250. {"--ppl-output-type"}, "<0|1>",
  1251. format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  1252. [](gpt_params & params, int value) {
  1253. params.ppl_output_type = value;
  1254. }
  1255. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1256. add_opt(llama_arg(
  1257. {"-dt", "--defrag-thold"}, "N",
  1258. format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold),
  1259. [](gpt_params & params, const std::string & value) {
  1260. params.defrag_thold = std::stof(value);
  1261. }
  1262. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  1263. add_opt(llama_arg(
  1264. {"-np", "--parallel"}, "N",
  1265. format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  1266. [](gpt_params & params, int value) {
  1267. params.n_parallel = value;
  1268. }
  1269. ));
  1270. add_opt(llama_arg(
  1271. {"-ns", "--sequences"}, "N",
  1272. format("number of sequences to decode (default: %d)", params.n_sequences),
  1273. [](gpt_params & params, int value) {
  1274. params.n_sequences = value;
  1275. }
  1276. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  1277. add_opt(llama_arg(
  1278. {"-cb", "--cont-batching"},
  1279. format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  1280. [](gpt_params & params) {
  1281. params.cont_batching = true;
  1282. }
  1283. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  1284. add_opt(llama_arg(
  1285. {"-nocb", "--no-cont-batching"},
  1286. "disable continuous batching",
  1287. [](gpt_params & params) {
  1288. params.cont_batching = false;
  1289. }
  1290. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  1291. add_opt(llama_arg(
  1292. {"--mmproj"}, "FILE",
  1293. "path to a multimodal projector file for LLaVA. see examples/llava/README.md",
  1294. [](gpt_params & params, const std::string & value) {
  1295. params.mmproj = value;
  1296. }
  1297. ).set_examples({LLAMA_EXAMPLE_LLAVA}));
  1298. add_opt(llama_arg(
  1299. {"--image"}, "FILE",
  1300. "path to an image file. use with multimodal models. Specify multiple times for batching",
  1301. [](gpt_params & params, const std::string & value) {
  1302. params.image.emplace_back(value);
  1303. }
  1304. ).set_examples({LLAMA_EXAMPLE_LLAVA}));
  1305. #ifdef GGML_USE_RPC
  1306. add_opt(llama_arg(
  1307. {"--rpc"}, "SERVERS",
  1308. "comma separated list of RPC servers",
  1309. [](gpt_params & params, const std::string & value) {
  1310. params.rpc_servers = value;
  1311. }
  1312. ));
  1313. #endif
  1314. add_opt(llama_arg(
  1315. {"--mlock"},
  1316. "force system to keep model in RAM rather than swapping or compressing",
  1317. [](gpt_params & params) {
  1318. params.use_mlock = true;
  1319. }
  1320. ));
  1321. add_opt(llama_arg(
  1322. {"--no-mmap"},
  1323. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  1324. [](gpt_params & params) {
  1325. params.use_mmap = false;
  1326. }
  1327. ));
  1328. add_opt(llama_arg(
  1329. {"--numa"}, "TYPE",
  1330. "attempt optimizations that help on some NUMA systems\n"
  1331. "- distribute: spread execution evenly over all nodes\n"
  1332. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  1333. "- numactl: use the CPU map provided by numactl\n"
  1334. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  1335. "see https://github.com/ggerganov/llama.cpp/issues/1437",
  1336. [](gpt_params & params, const std::string & value) {
  1337. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  1338. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  1339. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  1340. else { throw std::invalid_argument("invalid value"); }
  1341. }
  1342. ));
  1343. add_opt(llama_arg(
  1344. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  1345. "number of layers to store in VRAM",
  1346. [](gpt_params & params, int value) {
  1347. params.n_gpu_layers = value;
  1348. if (!llama_supports_gpu_offload()) {
  1349. fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n");
  1350. fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
  1351. }
  1352. }
  1353. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  1354. add_opt(llama_arg(
  1355. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  1356. "number of layers to store in VRAM for the draft model",
  1357. [](gpt_params & params, int value) {
  1358. params.n_gpu_layers_draft = value;
  1359. if (!llama_supports_gpu_offload()) {
  1360. fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n");
  1361. fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
  1362. }
  1363. }
  1364. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  1365. add_opt(llama_arg(
  1366. {"-sm", "--split-mode"}, "{none,layer,row}",
  1367. "how to split the model across multiple GPUs, one of:\n"
  1368. "- none: use one GPU only\n"
  1369. "- layer (default): split layers and KV across GPUs\n"
  1370. "- row: split rows across GPUs",
  1371. [](gpt_params & params, const std::string & value) {
  1372. std::string arg_next = value;
  1373. if (arg_next == "none") {
  1374. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  1375. } else if (arg_next == "layer") {
  1376. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  1377. }
  1378. else if (arg_next == "row") {
  1379. #ifdef GGML_USE_SYCL
  1380. fprintf(stderr, "warning: The split mode value:[row] is not supported by llama.cpp with SYCL. It's developing.\nExit!\n");
  1381. exit(1);
  1382. #endif // GGML_USE_SYCL
  1383. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  1384. }
  1385. else {
  1386. throw std::invalid_argument("invalid value");
  1387. }
  1388. #ifndef GGML_USE_CUDA_SYCL_VULKAN
  1389. fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the split mode has no effect.\n");
  1390. #endif // GGML_USE_CUDA_SYCL_VULKAN
  1391. }
  1392. ));
  1393. add_opt(llama_arg(
  1394. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  1395. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  1396. [](gpt_params & params, const std::string & value) {
  1397. std::string arg_next = value;
  1398. // split string by , and /
  1399. const std::regex regex{ R"([,/]+)" };
  1400. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  1401. std::vector<std::string> split_arg{ it, {} };
  1402. if (split_arg.size() >= llama_max_devices()) {
  1403. throw std::invalid_argument(
  1404. format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  1405. );
  1406. }
  1407. for (size_t i = 0; i < llama_max_devices(); ++i) {
  1408. if (i < split_arg.size()) {
  1409. params.tensor_split[i] = std::stof(split_arg[i]);
  1410. } else {
  1411. params.tensor_split[i] = 0.0f;
  1412. }
  1413. }
  1414. #ifndef GGML_USE_CUDA_SYCL_VULKAN
  1415. fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting a tensor split has no effect.\n");
  1416. #endif // GGML_USE_CUDA_SYCL_VULKAN
  1417. }
  1418. ));
  1419. add_opt(llama_arg(
  1420. {"-mg", "--main-gpu"}, "INDEX",
  1421. format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  1422. [](gpt_params & params, int value) {
  1423. params.main_gpu = value;
  1424. #ifndef GGML_USE_CUDA_SYCL_VULKAN
  1425. fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.\n");
  1426. #endif // GGML_USE_CUDA_SYCL_VULKAN
  1427. }
  1428. ));
  1429. add_opt(llama_arg(
  1430. {"--check-tensors"},
  1431. format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  1432. [](gpt_params & params) {
  1433. params.check_tensors = true;
  1434. }
  1435. ));
  1436. add_opt(llama_arg(
  1437. {"--override-kv"}, "KEY=TYPE:VALUE",
  1438. "advanced option to override model metadata by key. may be specified multiple times.\n"
  1439. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  1440. [](gpt_params & params, const std::string & value) {
  1441. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  1442. throw std::runtime_error(format("error: Invalid type for KV override: %s\n", value.c_str()));
  1443. }
  1444. }
  1445. ));
  1446. add_opt(llama_arg(
  1447. {"--lora"}, "FNAME",
  1448. "path to LoRA adapter (can be repeated to use multiple adapters)",
  1449. [](gpt_params & params, const std::string & value) {
  1450. params.lora_adapters.push_back({ std::string(value), 1.0 });
  1451. }
  1452. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1453. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1454. add_opt(llama_arg(
  1455. {"--lora-scaled"}, "FNAME", "SCALE",
  1456. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  1457. [](gpt_params & params, const std::string & fname, const std::string & scale) {
  1458. params.lora_adapters.push_back({ fname, std::stof(scale) });
  1459. }
  1460. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1461. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1462. add_opt(llama_arg(
  1463. {"--control-vector"}, "FNAME",
  1464. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  1465. [](gpt_params & params, const std::string & value) {
  1466. params.control_vectors.push_back({ 1.0f, value, });
  1467. }
  1468. ));
  1469. add_opt(llama_arg(
  1470. {"--control-vector-scaled"}, "FNAME", "SCALE",
  1471. "add a control vector with user defined scaling SCALE\n"
  1472. "note: this argument can be repeated to add multiple scaled control vectors",
  1473. [](gpt_params & params, const std::string & fname, const std::string & scale) {
  1474. params.control_vectors.push_back({ std::stof(scale), fname });
  1475. }
  1476. ));
  1477. add_opt(llama_arg(
  1478. {"--control-vector-layer-range"}, "START", "END",
  1479. "layer range to apply the control vector(s) to, start and end inclusive",
  1480. [](gpt_params & params, const std::string & start, const std::string & end) {
  1481. params.control_vector_layer_start = std::stoi(start);
  1482. params.control_vector_layer_end = std::stoi(end);
  1483. }
  1484. ));
  1485. add_opt(llama_arg(
  1486. {"-a", "--alias"}, "STRING",
  1487. "set alias for model name (to be used by REST API)",
  1488. [](gpt_params & params, const std::string & value) {
  1489. params.model_alias = value;
  1490. }
  1491. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1492. add_opt(llama_arg(
  1493. {"-m", "--model"}, "FNAME",
  1494. ex == LLAMA_EXAMPLE_EXPORT_LORA
  1495. ? std::string("model path from which to load base model")
  1496. : format(
  1497. "model path (default: `models/$filename` with filename from `--hf-file` "
  1498. "or `--model-url` if set, otherwise %s)", DEFAULT_MODEL_PATH
  1499. ),
  1500. [](gpt_params & params, const std::string & value) {
  1501. params.model = value;
  1502. }
  1503. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  1504. add_opt(llama_arg(
  1505. {"-md", "--model-draft"}, "FNAME",
  1506. "draft model for speculative decoding (default: unused)",
  1507. [](gpt_params & params, const std::string & value) {
  1508. params.model_draft = value;
  1509. }
  1510. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  1511. add_opt(llama_arg(
  1512. {"-mu", "--model-url"}, "MODEL_URL",
  1513. "model download url (default: unused)",
  1514. [](gpt_params & params, const std::string & value) {
  1515. params.model_url = value;
  1516. }
  1517. ).set_env("LLAMA_ARG_MODEL_URL"));
  1518. add_opt(llama_arg(
  1519. {"-hfr", "--hf-repo"}, "REPO",
  1520. "Hugging Face model repository (default: unused)",
  1521. [](gpt_params & params, const std::string & value) {
  1522. params.hf_repo = value;
  1523. }
  1524. ).set_env("LLAMA_ARG_HF_REPO"));
  1525. add_opt(llama_arg(
  1526. {"-hff", "--hf-file"}, "FILE",
  1527. "Hugging Face model file (default: unused)",
  1528. [](gpt_params & params, const std::string & value) {
  1529. params.hf_file = value;
  1530. }
  1531. ).set_env("LLAMA_ARG_HF_FILE"));
  1532. add_opt(llama_arg(
  1533. {"-hft", "--hf-token"}, "TOKEN",
  1534. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  1535. [](gpt_params & params, const std::string & value) {
  1536. params.hf_token = value;
  1537. }
  1538. ).set_env("HF_TOKEN"));
  1539. add_opt(llama_arg(
  1540. {"--context-file"}, "FNAME",
  1541. "file to load context from (repeat to specify multiple files)",
  1542. [](gpt_params & params, const std::string & value) {
  1543. std::ifstream file(value, std::ios::binary);
  1544. if (!file) {
  1545. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  1546. }
  1547. params.context_files.push_back(value);
  1548. }
  1549. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1550. add_opt(llama_arg(
  1551. {"--chunk-size"}, "N",
  1552. format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  1553. [](gpt_params & params, int value) {
  1554. params.chunk_size = value;
  1555. }
  1556. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1557. add_opt(llama_arg(
  1558. {"--chunk-separator"}, "STRING",
  1559. format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  1560. [](gpt_params & params, const std::string & value) {
  1561. params.chunk_separator = value;
  1562. }
  1563. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1564. add_opt(llama_arg(
  1565. {"--junk"}, "N",
  1566. format("number of times to repeat the junk text (default: %d)", params.n_junk),
  1567. [](gpt_params & params, int value) {
  1568. params.n_junk = value;
  1569. }
  1570. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  1571. add_opt(llama_arg(
  1572. {"--pos"}, "N",
  1573. format("position of the passkey in the junk text (default: %d)", params.i_pos),
  1574. [](gpt_params & params, int value) {
  1575. params.i_pos = value;
  1576. }
  1577. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  1578. add_opt(llama_arg(
  1579. {"-o", "--output", "--output-file"}, "FNAME",
  1580. format("output file (default: '%s')",
  1581. ex == LLAMA_EXAMPLE_EXPORT_LORA
  1582. ? params.lora_outfile.c_str()
  1583. : ex == LLAMA_EXAMPLE_CVECTOR_GENERATOR
  1584. ? params.cvector_outfile.c_str()
  1585. : params.out_file.c_str()),
  1586. [](gpt_params & params, const std::string & value) {
  1587. params.out_file = value;
  1588. params.cvector_outfile = value;
  1589. params.lora_outfile = value;
  1590. }
  1591. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA}));
  1592. add_opt(llama_arg(
  1593. {"-ofreq", "--output-frequency"}, "N",
  1594. format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  1595. [](gpt_params & params, int value) {
  1596. params.n_out_freq = value;
  1597. }
  1598. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1599. add_opt(llama_arg(
  1600. {"--save-frequency"}, "N",
  1601. format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  1602. [](gpt_params & params, int value) {
  1603. params.n_save_freq = value;
  1604. }
  1605. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1606. add_opt(llama_arg(
  1607. {"--process-output"},
  1608. format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  1609. [](gpt_params & params) {
  1610. params.process_output = true;
  1611. }
  1612. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1613. add_opt(llama_arg(
  1614. {"--no-ppl"},
  1615. format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  1616. [](gpt_params & params) {
  1617. params.compute_ppl = false;
  1618. }
  1619. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1620. add_opt(llama_arg(
  1621. {"--chunk", "--from-chunk"}, "N",
  1622. format("start processing the input from chunk N (default: %d)", params.i_chunk),
  1623. [](gpt_params & params, int value) {
  1624. params.i_chunk = value;
  1625. }
  1626. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1627. add_opt(llama_arg(
  1628. {"-pps"},
  1629. format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  1630. [](gpt_params & params) {
  1631. params.is_pp_shared = true;
  1632. }
  1633. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1634. add_opt(llama_arg(
  1635. {"-npp"}, "n0,n1,...",
  1636. "number of prompt tokens",
  1637. [](gpt_params & params, const std::string & value) {
  1638. auto p = string_split<int>(value, ',');
  1639. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  1640. }
  1641. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1642. add_opt(llama_arg(
  1643. {"-ntg"}, "n0,n1,...",
  1644. "number of text generation tokens",
  1645. [](gpt_params & params, const std::string & value) {
  1646. auto p = string_split<int>(value, ',');
  1647. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  1648. }
  1649. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1650. add_opt(llama_arg(
  1651. {"-npl"}, "n0,n1,...",
  1652. "number of parallel prompts",
  1653. [](gpt_params & params, const std::string & value) {
  1654. auto p = string_split<int>(value, ',');
  1655. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  1656. }
  1657. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1658. add_opt(llama_arg(
  1659. {"--embd-normalize"}, "N",
  1660. format("normalisation for embendings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  1661. [](gpt_params & params, int value) {
  1662. params.embd_normalize = value;
  1663. }
  1664. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1665. add_opt(llama_arg(
  1666. {"--embd-output-format"}, "FORMAT",
  1667. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix",
  1668. [](gpt_params & params, const std::string & value) {
  1669. params.embd_out = value;
  1670. }
  1671. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1672. add_opt(llama_arg(
  1673. {"--embd-separator"}, "STRING",
  1674. "separator of embendings (default \\n) for example \"<#sep#>\"",
  1675. [](gpt_params & params, const std::string & value) {
  1676. params.embd_sep = value;
  1677. }
  1678. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1679. add_opt(llama_arg(
  1680. {"--host"}, "HOST",
  1681. format("ip address to listen (default: %s)", params.hostname.c_str()),
  1682. [](gpt_params & params, const std::string & value) {
  1683. params.hostname = value;
  1684. }
  1685. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  1686. add_opt(llama_arg(
  1687. {"--port"}, "PORT",
  1688. format("port to listen (default: %d)", params.port),
  1689. [](gpt_params & params, int value) {
  1690. params.port = value;
  1691. }
  1692. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  1693. add_opt(llama_arg(
  1694. {"--path"}, "PATH",
  1695. format("path to serve static files from (default: %s)", params.public_path.c_str()),
  1696. [](gpt_params & params, const std::string & value) {
  1697. params.public_path = value;
  1698. }
  1699. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1700. add_opt(llama_arg(
  1701. {"--embedding", "--embeddings"},
  1702. format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  1703. [](gpt_params & params) {
  1704. params.embedding = true;
  1705. }
  1706. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  1707. add_opt(llama_arg(
  1708. {"--api-key"}, "KEY",
  1709. "API key to use for authentication (default: none)",
  1710. [](gpt_params & params, const std::string & value) {
  1711. params.api_keys.push_back(value);
  1712. }
  1713. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  1714. add_opt(llama_arg(
  1715. {"--api-key-file"}, "FNAME",
  1716. "path to file containing API keys (default: none)",
  1717. [](gpt_params & params, const std::string & value) {
  1718. std::ifstream key_file(value);
  1719. if (!key_file) {
  1720. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  1721. }
  1722. std::string key;
  1723. while (std::getline(key_file, key)) {
  1724. if (!key.empty()) {
  1725. params.api_keys.push_back(key);
  1726. }
  1727. }
  1728. key_file.close();
  1729. }
  1730. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1731. add_opt(llama_arg(
  1732. {"--ssl-key-file"}, "FNAME",
  1733. "path to file a PEM-encoded SSL private key",
  1734. [](gpt_params & params, const std::string & value) {
  1735. params.ssl_file_key = value;
  1736. }
  1737. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1738. add_opt(llama_arg(
  1739. {"--ssl-cert-file"}, "FNAME",
  1740. "path to file a PEM-encoded SSL certificate",
  1741. [](gpt_params & params, const std::string & value) {
  1742. params.ssl_file_cert = value;
  1743. }
  1744. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1745. add_opt(llama_arg(
  1746. {"-to", "--timeout"}, "N",
  1747. format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  1748. [](gpt_params & params, int value) {
  1749. params.timeout_read = value;
  1750. params.timeout_write = value;
  1751. }
  1752. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1753. add_opt(llama_arg(
  1754. {"--threads-http"}, "N",
  1755. format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  1756. [](gpt_params & params, int value) {
  1757. params.n_threads_http = value;
  1758. }
  1759. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  1760. add_opt(llama_arg(
  1761. {"-spf", "--system-prompt-file"}, "FNAME",
  1762. "set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications",
  1763. [](gpt_params & params, const std::string & value) {
  1764. std::ifstream file(value);
  1765. if (!file) {
  1766. throw std::runtime_error(format("error: failed to open file '%s'\n", value.c_str()));
  1767. }
  1768. std::string system_prompt;
  1769. std::copy(
  1770. std::istreambuf_iterator<char>(file),
  1771. std::istreambuf_iterator<char>(),
  1772. std::back_inserter(system_prompt)
  1773. );
  1774. params.system_prompt = system_prompt;
  1775. }
  1776. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1777. add_opt(llama_arg(
  1778. {"--log-format"}, "{text, json}",
  1779. "log output format: json or text (default: json)",
  1780. [](gpt_params & params, const std::string & value) {
  1781. if (value == "json") {
  1782. params.log_json = true;
  1783. } else if (value == "text") {
  1784. params.log_json = false;
  1785. } else {
  1786. throw std::invalid_argument("invalid value");
  1787. }
  1788. }
  1789. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1790. add_opt(llama_arg(
  1791. {"--metrics"},
  1792. format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  1793. [](gpt_params & params) {
  1794. params.endpoint_metrics = true;
  1795. }
  1796. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  1797. add_opt(llama_arg(
  1798. {"--no-slots"},
  1799. format("disables slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  1800. [](gpt_params & params) {
  1801. params.endpoint_slots = false;
  1802. }
  1803. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  1804. add_opt(llama_arg(
  1805. {"--slot-save-path"}, "PATH",
  1806. "path to save slot kv cache (default: disabled)",
  1807. [](gpt_params & params, const std::string & value) {
  1808. params.slot_save_path = value;
  1809. // if doesn't end with DIRECTORY_SEPARATOR, add it
  1810. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  1811. params.slot_save_path += DIRECTORY_SEPARATOR;
  1812. }
  1813. }
  1814. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1815. add_opt(llama_arg(
  1816. {"--chat-template"}, "JINJA_TEMPLATE",
  1817. "set custom jinja chat template (default: template taken from model's metadata)\n"
  1818. "if suffix/prefix are specified, template will be disabled\n"
  1819. "only commonly used templates are accepted:\nhttps://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template",
  1820. [](gpt_params & params, const std::string & value) {
  1821. if (!llama_chat_verify_template(value)) {
  1822. throw std::runtime_error(format(
  1823. "error: the supplied chat template is not supported: %s\n"
  1824. "note: llama.cpp does not use jinja parser, we only support commonly used templates\n",
  1825. value.c_str()
  1826. ));
  1827. }
  1828. params.chat_template = value;
  1829. }
  1830. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  1831. add_opt(llama_arg(
  1832. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  1833. format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  1834. [](gpt_params & params, const std::string & value) {
  1835. params.slot_prompt_similarity = std::stof(value);
  1836. }
  1837. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1838. add_opt(llama_arg(
  1839. {"--lora-init-without-apply"},
  1840. format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  1841. [](gpt_params & params) {
  1842. params.lora_init_without_apply = true;
  1843. }
  1844. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1845. add_opt(llama_arg(
  1846. {"--simple-io"},
  1847. "use basic IO for better compatibility in subprocesses and limited consoles",
  1848. [](gpt_params & params) {
  1849. params.simple_io = true;
  1850. }
  1851. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  1852. add_opt(llama_arg(
  1853. {"-ld", "--logdir"}, "LOGDIR",
  1854. "path under which to save YAML logs (no logging if unset)",
  1855. [](gpt_params & params, const std::string & value) {
  1856. params.logdir = value;
  1857. if (params.logdir.back() != DIRECTORY_SEPARATOR) {
  1858. params.logdir += DIRECTORY_SEPARATOR;
  1859. }
  1860. }
  1861. ));
  1862. add_opt(llama_arg(
  1863. {"--positive-file"}, "FNAME",
  1864. format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  1865. [](gpt_params & params, const std::string & value) {
  1866. params.cvector_positive_file = value;
  1867. }
  1868. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1869. add_opt(llama_arg(
  1870. {"--negative-file"}, "FNAME",
  1871. format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  1872. [](gpt_params & params, const std::string & value) {
  1873. params.cvector_negative_file = value;
  1874. }
  1875. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1876. add_opt(llama_arg(
  1877. {"--pca-batch"}, "N",
  1878. format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  1879. [](gpt_params & params, int value) {
  1880. params.n_pca_batch = value;
  1881. }
  1882. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1883. add_opt(llama_arg(
  1884. {"--pca-iter"}, "N",
  1885. format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  1886. [](gpt_params & params, int value) {
  1887. params.n_pca_iterations = value;
  1888. }
  1889. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1890. add_opt(llama_arg(
  1891. {"--method"}, "{pca, mean}",
  1892. "dimensionality reduction method to be used (default: pca)",
  1893. [](gpt_params & params, const std::string & value) {
  1894. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  1895. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  1896. else { throw std::invalid_argument("invalid value"); }
  1897. }
  1898. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1899. add_opt(llama_arg(
  1900. {"--output-format"}, "{md,jsonl}",
  1901. "output format for batched-bench results (default: md)",
  1902. [](gpt_params & params, const std::string & value) {
  1903. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  1904. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  1905. else { std::invalid_argument("invalid value"); }
  1906. }
  1907. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1908. #ifndef LOG_DISABLE_LOGS
  1909. // TODO: make this looks less weird
  1910. add_opt(llama_arg(
  1911. {"--log-test"},
  1912. "Log test",
  1913. [](gpt_params &) { log_param_single_parse("--log-test"); }
  1914. ));
  1915. add_opt(llama_arg(
  1916. {"--log-disable"},
  1917. "Log disable",
  1918. [](gpt_params &) { log_param_single_parse("--log-disable"); }
  1919. ));
  1920. add_opt(llama_arg(
  1921. {"--log-enable"},
  1922. "Log enable",
  1923. [](gpt_params &) { log_param_single_parse("--log-enable"); }
  1924. ));
  1925. add_opt(llama_arg(
  1926. {"--log-new"},
  1927. "Log new",
  1928. [](gpt_params &) { log_param_single_parse("--log-new"); }
  1929. ));
  1930. add_opt(llama_arg(
  1931. {"--log-append"},
  1932. "Log append",
  1933. [](gpt_params &) { log_param_single_parse("--log-append"); }
  1934. ));
  1935. add_opt(llama_arg(
  1936. {"--log-file"}, "FNAME",
  1937. "Log file",
  1938. [](gpt_params &, const std::string & value) { log_param_pair_parse(false, "--log-file", value); }
  1939. ));
  1940. #endif // LOG_DISABLE_LOGS
  1941. return ctx_arg;
  1942. }