arg.cpp 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287
  1. #include "arg.h"
  2. #include "log.h"
  3. #include "sampling.h"
  4. #include <algorithm>
  5. #include <climits>
  6. #include <cstdarg>
  7. #include <fstream>
  8. #include <regex>
  9. #include <set>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. #include "json-schema-to-grammar.h"
  14. using json = nlohmann::ordered_json;
  15. common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  16. this->examples = std::move(examples);
  17. return *this;
  18. }
  19. common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
  20. this->excludes = std::move(excludes);
  21. return *this;
  22. }
  23. common_arg & common_arg::set_env(const char * env) {
  24. help = help + "\n(env: " + env + ")";
  25. this->env = env;
  26. return *this;
  27. }
  28. common_arg & common_arg::set_sparam() {
  29. is_sparam = true;
  30. return *this;
  31. }
  32. bool common_arg::in_example(enum llama_example ex) {
  33. return examples.find(ex) != examples.end();
  34. }
  35. bool common_arg::is_exclude(enum llama_example ex) {
  36. return excludes.find(ex) != excludes.end();
  37. }
  38. bool common_arg::get_value_from_env(std::string & output) {
  39. if (env == nullptr) return false;
  40. char * value = std::getenv(env);
  41. if (value) {
  42. output = value;
  43. return true;
  44. }
  45. return false;
  46. }
  47. bool common_arg::has_value_from_env() {
  48. return env != nullptr && std::getenv(env);
  49. }
  50. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  51. std::vector<std::string> result;
  52. std::istringstream iss(input);
  53. std::string line;
  54. auto add_line = [&](const std::string& l) {
  55. if (l.length() <= max_char_per_line) {
  56. result.push_back(l);
  57. } else {
  58. std::istringstream line_stream(l);
  59. std::string word, current_line;
  60. while (line_stream >> word) {
  61. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  62. if (!current_line.empty()) result.push_back(current_line);
  63. current_line = word;
  64. } else {
  65. current_line += (!current_line.empty() ? " " : "") + word;
  66. }
  67. }
  68. if (!current_line.empty()) result.push_back(current_line);
  69. }
  70. };
  71. while (std::getline(iss, line)) {
  72. add_line(line);
  73. }
  74. return result;
  75. }
  76. std::string common_arg::to_string() {
  77. // params for printing to console
  78. const static int n_leading_spaces = 40;
  79. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  80. std::string leading_spaces(n_leading_spaces, ' ');
  81. std::ostringstream ss;
  82. for (const auto arg : args) {
  83. if (arg == args.front()) {
  84. if (args.size() == 1) {
  85. ss << arg;
  86. } else {
  87. // first arg is usually abbreviation, we need padding to make it more beautiful
  88. auto tmp = std::string(arg) + ", ";
  89. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  90. ss << tmp << spaces;
  91. }
  92. } else {
  93. ss << arg << (arg != args.back() ? ", " : "");
  94. }
  95. }
  96. if (value_hint) ss << " " << value_hint;
  97. if (value_hint_2) ss << " " << value_hint_2;
  98. if (ss.tellp() > n_leading_spaces - 3) {
  99. // current line is too long, add new line
  100. ss << "\n" << leading_spaces;
  101. } else {
  102. // padding between arg and help, same line
  103. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  104. }
  105. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  106. for (const auto & line : help_lines) {
  107. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  108. }
  109. return ss.str();
  110. }
  111. //
  112. // utils
  113. //
  114. static void common_params_handle_model_default(
  115. std::string & model,
  116. const std::string & model_url,
  117. std::string & hf_repo,
  118. std::string & hf_file,
  119. const std::string & hf_token,
  120. const std::string & model_default) {
  121. if (!hf_repo.empty()) {
  122. // short-hand to avoid specifying --hf-file -> default it to --model
  123. if (hf_file.empty()) {
  124. if (model.empty()) {
  125. auto auto_detected = common_get_hf_file(hf_repo, hf_token);
  126. if (auto_detected.first.empty() || auto_detected.second.empty()) {
  127. exit(1); // built without CURL, error message already printed
  128. }
  129. hf_repo = auto_detected.first;
  130. hf_file = auto_detected.second;
  131. } else {
  132. hf_file = model;
  133. }
  134. }
  135. // make sure model path is present (for caching purposes)
  136. if (model.empty()) {
  137. // this is to avoid different repo having same file name, or same file name in different subdirs
  138. std::string filename = hf_repo + "_" + hf_file;
  139. // to make sure we don't have any slashes in the filename
  140. string_replace_all(filename, "/", "_");
  141. model = fs_get_cache_file(filename);
  142. }
  143. } else if (!model_url.empty()) {
  144. if (model.empty()) {
  145. auto f = string_split<std::string>(model_url, '#').front();
  146. f = string_split<std::string>(f, '?').front();
  147. model = fs_get_cache_file(string_split<std::string>(f, '/').back());
  148. }
  149. } else if (model.empty()) {
  150. model = model_default;
  151. }
  152. }
  153. const std::vector<ggml_type> kv_cache_types = {
  154. GGML_TYPE_F32,
  155. GGML_TYPE_F16,
  156. GGML_TYPE_BF16,
  157. GGML_TYPE_Q8_0,
  158. GGML_TYPE_Q4_0,
  159. GGML_TYPE_Q4_1,
  160. GGML_TYPE_IQ4_NL,
  161. GGML_TYPE_Q5_0,
  162. GGML_TYPE_Q5_1,
  163. };
  164. static ggml_type kv_cache_type_from_str(const std::string & s) {
  165. for (const auto & type : kv_cache_types) {
  166. if (ggml_type_name(type) == s) {
  167. return type;
  168. }
  169. }
  170. throw std::runtime_error("Unsupported cache type: " + s);
  171. }
  172. static std::string get_all_kv_cache_types() {
  173. std::ostringstream msg;
  174. for (const auto & type : kv_cache_types) {
  175. msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
  176. }
  177. return msg.str();
  178. }
  179. //
  180. // CLI argument parsing functions
  181. //
  182. static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) {
  183. std::string arg;
  184. const std::string arg_prefix = "--";
  185. common_params & params = ctx_arg.params;
  186. std::unordered_map<std::string, common_arg *> arg_to_options;
  187. for (auto & opt : ctx_arg.options) {
  188. for (const auto & arg : opt.args) {
  189. arg_to_options[arg] = &opt;
  190. }
  191. }
  192. // handle environment variables
  193. for (auto & opt : ctx_arg.options) {
  194. std::string value;
  195. if (opt.get_value_from_env(value)) {
  196. try {
  197. if (opt.handler_void && (value == "1" || value == "true")) {
  198. opt.handler_void(params);
  199. }
  200. if (opt.handler_int) {
  201. opt.handler_int(params, std::stoi(value));
  202. }
  203. if (opt.handler_string) {
  204. opt.handler_string(params, value);
  205. continue;
  206. }
  207. } catch (std::exception & e) {
  208. throw std::invalid_argument(string_format(
  209. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  210. }
  211. }
  212. }
  213. // handle command line arguments
  214. auto check_arg = [&](int i) {
  215. if (i+1 >= argc) {
  216. throw std::invalid_argument("expected value for argument");
  217. }
  218. };
  219. for (int i = 1; i < argc; i++) {
  220. const std::string arg_prefix = "--";
  221. std::string arg = argv[i];
  222. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  223. std::replace(arg.begin(), arg.end(), '_', '-');
  224. }
  225. if (arg_to_options.find(arg) == arg_to_options.end()) {
  226. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  227. }
  228. auto opt = *arg_to_options[arg];
  229. if (opt.has_value_from_env()) {
  230. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  231. }
  232. try {
  233. if (opt.handler_void) {
  234. opt.handler_void(params);
  235. continue;
  236. }
  237. // arg with single value
  238. check_arg(i);
  239. std::string val = argv[++i];
  240. if (opt.handler_int) {
  241. opt.handler_int(params, std::stoi(val));
  242. continue;
  243. }
  244. if (opt.handler_string) {
  245. opt.handler_string(params, val);
  246. continue;
  247. }
  248. // arg with 2 values
  249. check_arg(i);
  250. std::string val2 = argv[++i];
  251. if (opt.handler_str_str) {
  252. opt.handler_str_str(params, val, val2);
  253. continue;
  254. }
  255. } catch (std::exception & e) {
  256. throw std::invalid_argument(string_format(
  257. "error while handling argument \"%s\": %s\n\n"
  258. "usage:\n%s\n\nto show complete usage, run with -h",
  259. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  260. }
  261. }
  262. postprocess_cpu_params(params.cpuparams, nullptr);
  263. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  264. postprocess_cpu_params(params.speculative.cpuparams, &params.cpuparams);
  265. postprocess_cpu_params(params.speculative.cpuparams_batch, &params.cpuparams_batch);
  266. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  267. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  268. }
  269. // TODO: refactor model params in a common struct
  270. common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file, params.hf_token, DEFAULT_MODEL_PATH);
  271. common_params_handle_model_default(params.speculative.model, params.speculative.model_url, params.speculative.hf_repo, params.speculative.hf_file, params.hf_token, "");
  272. common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file, params.hf_token, "");
  273. if (params.escape) {
  274. string_process_escapes(params.prompt);
  275. string_process_escapes(params.input_prefix);
  276. string_process_escapes(params.input_suffix);
  277. for (auto & antiprompt : params.antiprompt) {
  278. string_process_escapes(antiprompt);
  279. }
  280. for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
  281. string_process_escapes(seq_breaker);
  282. }
  283. }
  284. if (!params.kv_overrides.empty()) {
  285. params.kv_overrides.emplace_back();
  286. params.kv_overrides.back().key[0] = 0;
  287. }
  288. if (params.reranking && params.embedding) {
  289. throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both");
  290. }
  291. return true;
  292. }
  293. static void common_params_print_usage(common_params_context & ctx_arg) {
  294. auto print_options = [](std::vector<common_arg *> & options) {
  295. for (common_arg * opt : options) {
  296. printf("%s", opt->to_string().c_str());
  297. }
  298. };
  299. std::vector<common_arg *> common_options;
  300. std::vector<common_arg *> sparam_options;
  301. std::vector<common_arg *> specific_options;
  302. for (auto & opt : ctx_arg.options) {
  303. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  304. if (opt.is_sparam) {
  305. sparam_options.push_back(&opt);
  306. } else if (opt.in_example(ctx_arg.ex)) {
  307. specific_options.push_back(&opt);
  308. } else {
  309. common_options.push_back(&opt);
  310. }
  311. }
  312. printf("----- common params -----\n\n");
  313. print_options(common_options);
  314. printf("\n\n----- sampling params -----\n\n");
  315. print_options(sparam_options);
  316. // TODO: maybe convert enum llama_example to string
  317. printf("\n\n----- example-specific params -----\n\n");
  318. print_options(specific_options);
  319. }
  320. static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
  321. std::vector<ggml_backend_dev_t> devices;
  322. auto dev_names = string_split<std::string>(value, ',');
  323. if (dev_names.empty()) {
  324. throw std::invalid_argument("no devices specified");
  325. }
  326. if (dev_names.size() == 1 && dev_names[0] == "none") {
  327. devices.push_back(nullptr);
  328. } else {
  329. for (const auto & device : dev_names) {
  330. auto * dev = ggml_backend_dev_by_name(device.c_str());
  331. if (!dev || ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_GPU) {
  332. throw std::invalid_argument(string_format("invalid device: %s", device.c_str()));
  333. }
  334. devices.push_back(dev);
  335. }
  336. devices.push_back(nullptr);
  337. }
  338. return devices;
  339. }
  340. static void add_rpc_devices(std::string servers) {
  341. auto rpc_servers = string_split<std::string>(servers, ',');
  342. if (rpc_servers.empty()) {
  343. throw std::invalid_argument("no RPC servers specified");
  344. }
  345. ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
  346. if (!rpc_reg) {
  347. throw std::invalid_argument("failed to find RPC backend");
  348. }
  349. typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
  350. ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
  351. if (!ggml_backend_rpc_add_device_fn) {
  352. throw std::invalid_argument("failed to find RPC device add function");
  353. }
  354. for (const auto & server : rpc_servers) {
  355. ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
  356. if (dev) {
  357. ggml_backend_device_register(dev);
  358. } else {
  359. throw std::invalid_argument("failed to register RPC device");
  360. }
  361. }
  362. }
  363. bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  364. auto ctx_arg = common_params_parser_init(params, ex, print_usage);
  365. const common_params params_org = ctx_arg.params; // the example can modify the default params
  366. try {
  367. if (!common_params_parse_ex(argc, argv, ctx_arg)) {
  368. ctx_arg.params = params_org;
  369. return false;
  370. }
  371. if (ctx_arg.params.usage) {
  372. common_params_print_usage(ctx_arg);
  373. if (ctx_arg.print_usage) {
  374. ctx_arg.print_usage(argc, argv);
  375. }
  376. exit(0);
  377. }
  378. } catch (const std::invalid_argument & ex) {
  379. fprintf(stderr, "%s\n", ex.what());
  380. ctx_arg.params = params_org;
  381. return false;
  382. }
  383. return true;
  384. }
  385. static std::string list_builtin_chat_templates() {
  386. std::vector<const char *> supported_tmpl;
  387. int32_t res = llama_chat_builtin_templates(nullptr, 0);
  388. supported_tmpl.resize(res);
  389. res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
  390. std::ostringstream msg;
  391. for (auto & tmpl : supported_tmpl) {
  392. msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", ");
  393. }
  394. return msg.str();
  395. }
  396. common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  397. // load dynamic backends
  398. ggml_backend_load_all();
  399. common_params_context ctx_arg(params);
  400. ctx_arg.print_usage = print_usage;
  401. ctx_arg.ex = ex;
  402. std::string sampler_type_chars;
  403. std::string sampler_type_names;
  404. for (const auto & sampler : params.sampling.samplers) {
  405. sampler_type_chars += common_sampler_type_to_chr(sampler);
  406. sampler_type_names += common_sampler_type_to_str(sampler) + ";";
  407. }
  408. sampler_type_names.pop_back();
  409. /**
  410. * filter options by example
  411. * rules:
  412. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  413. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  414. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  415. */
  416. auto add_opt = [&](common_arg arg) {
  417. if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
  418. ctx_arg.options.push_back(std::move(arg));
  419. }
  420. };
  421. add_opt(common_arg(
  422. {"-h", "--help", "--usage"},
  423. "print usage and exit",
  424. [](common_params & params) {
  425. params.usage = true;
  426. }
  427. ));
  428. add_opt(common_arg(
  429. {"--version"},
  430. "show version and build info",
  431. [](common_params &) {
  432. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  433. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  434. exit(0);
  435. }
  436. ));
  437. add_opt(common_arg(
  438. {"--verbose-prompt"},
  439. string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  440. [](common_params & params) {
  441. params.verbose_prompt = true;
  442. }
  443. ));
  444. add_opt(common_arg(
  445. {"--no-display-prompt"},
  446. string_format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  447. [](common_params & params) {
  448. params.display_prompt = false;
  449. }
  450. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  451. add_opt(common_arg(
  452. {"-co", "--color"},
  453. string_format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"),
  454. [](common_params & params) {
  455. params.use_color = true;
  456. }
  457. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  458. add_opt(common_arg(
  459. {"-t", "--threads"}, "N",
  460. string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
  461. [](common_params & params, int value) {
  462. params.cpuparams.n_threads = value;
  463. if (params.cpuparams.n_threads <= 0) {
  464. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  465. }
  466. }
  467. ).set_env("LLAMA_ARG_THREADS"));
  468. add_opt(common_arg(
  469. {"-tb", "--threads-batch"}, "N",
  470. "number of threads to use during batch and prompt processing (default: same as --threads)",
  471. [](common_params & params, int value) {
  472. params.cpuparams_batch.n_threads = value;
  473. if (params.cpuparams_batch.n_threads <= 0) {
  474. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  475. }
  476. }
  477. ));
  478. add_opt(common_arg(
  479. {"-C", "--cpu-mask"}, "M",
  480. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  481. [](common_params & params, const std::string & mask) {
  482. params.cpuparams.mask_valid = true;
  483. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  484. throw std::invalid_argument("invalid cpumask");
  485. }
  486. }
  487. ));
  488. add_opt(common_arg(
  489. {"-Cr", "--cpu-range"}, "lo-hi",
  490. "range of CPUs for affinity. Complements --cpu-mask",
  491. [](common_params & params, const std::string & range) {
  492. params.cpuparams.mask_valid = true;
  493. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  494. throw std::invalid_argument("invalid range");
  495. }
  496. }
  497. ));
  498. add_opt(common_arg(
  499. {"--cpu-strict"}, "<0|1>",
  500. string_format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  501. [](common_params & params, const std::string & value) {
  502. params.cpuparams.strict_cpu = std::stoul(value);
  503. }
  504. ));
  505. add_opt(common_arg(
  506. {"--prio"}, "N",
  507. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams.priority),
  508. [](common_params & params, int prio) {
  509. if (prio < 0 || prio > 3) {
  510. throw std::invalid_argument("invalid value");
  511. }
  512. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  513. }
  514. ));
  515. add_opt(common_arg(
  516. {"--poll"}, "<0...100>",
  517. string_format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  518. [](common_params & params, const std::string & value) {
  519. params.cpuparams.poll = std::stoul(value);
  520. }
  521. ));
  522. add_opt(common_arg(
  523. {"-Cb", "--cpu-mask-batch"}, "M",
  524. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  525. [](common_params & params, const std::string & mask) {
  526. params.cpuparams_batch.mask_valid = true;
  527. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  528. throw std::invalid_argument("invalid cpumask");
  529. }
  530. }
  531. ));
  532. add_opt(common_arg(
  533. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  534. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  535. [](common_params & params, const std::string & range) {
  536. params.cpuparams_batch.mask_valid = true;
  537. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  538. throw std::invalid_argument("invalid range");
  539. }
  540. }
  541. ));
  542. add_opt(common_arg(
  543. {"--cpu-strict-batch"}, "<0|1>",
  544. "use strict CPU placement (default: same as --cpu-strict)",
  545. [](common_params & params, int value) {
  546. params.cpuparams_batch.strict_cpu = value;
  547. }
  548. ));
  549. add_opt(common_arg(
  550. {"--prio-batch"}, "N",
  551. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  552. [](common_params & params, int prio) {
  553. if (prio < 0 || prio > 3) {
  554. throw std::invalid_argument("invalid value");
  555. }
  556. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  557. }
  558. ));
  559. add_opt(common_arg(
  560. {"--poll-batch"}, "<0|1>",
  561. "use polling to wait for work (default: same as --poll)",
  562. [](common_params & params, int value) {
  563. params.cpuparams_batch.poll = value;
  564. }
  565. ));
  566. add_opt(common_arg(
  567. {"-lcs", "--lookup-cache-static"}, "FNAME",
  568. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  569. [](common_params & params, const std::string & value) {
  570. params.lookup_cache_static = value;
  571. }
  572. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  573. add_opt(common_arg(
  574. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  575. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  576. [](common_params & params, const std::string & value) {
  577. params.lookup_cache_dynamic = value;
  578. }
  579. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  580. add_opt(common_arg(
  581. {"-c", "--ctx-size"}, "N",
  582. string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  583. [](common_params & params, int value) {
  584. params.n_ctx = value;
  585. }
  586. ).set_env("LLAMA_ARG_CTX_SIZE"));
  587. add_opt(common_arg(
  588. {"-n", "--predict", "--n-predict"}, "N",
  589. string_format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
  590. [](common_params & params, int value) {
  591. params.n_predict = value;
  592. }
  593. ).set_env("LLAMA_ARG_N_PREDICT"));
  594. add_opt(common_arg(
  595. {"-b", "--batch-size"}, "N",
  596. string_format("logical maximum batch size (default: %d)", params.n_batch),
  597. [](common_params & params, int value) {
  598. params.n_batch = value;
  599. }
  600. ).set_env("LLAMA_ARG_BATCH"));
  601. add_opt(common_arg(
  602. {"-ub", "--ubatch-size"}, "N",
  603. string_format("physical maximum batch size (default: %d)", params.n_ubatch),
  604. [](common_params & params, int value) {
  605. params.n_ubatch = value;
  606. }
  607. ).set_env("LLAMA_ARG_UBATCH"));
  608. add_opt(common_arg(
  609. {"--keep"}, "N",
  610. string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  611. [](common_params & params, int value) {
  612. params.n_keep = value;
  613. }
  614. ));
  615. add_opt(common_arg(
  616. {"--no-context-shift"},
  617. string_format("disables context shift on inifinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
  618. [](common_params & params) {
  619. params.ctx_shift = false;
  620. }
  621. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
  622. add_opt(common_arg(
  623. {"--chunks"}, "N",
  624. string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  625. [](common_params & params, int value) {
  626. params.n_chunks = value;
  627. }
  628. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  629. add_opt(common_arg(
  630. {"-fa", "--flash-attn"},
  631. string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
  632. [](common_params & params) {
  633. params.flash_attn = true;
  634. }
  635. ).set_env("LLAMA_ARG_FLASH_ATTN"));
  636. add_opt(common_arg(
  637. {"-p", "--prompt"}, "PROMPT",
  638. ex == LLAMA_EXAMPLE_MAIN
  639. ? "prompt to start generation with\nif -cnv is set, this will be used as system prompt"
  640. : "prompt to start generation with",
  641. [](common_params & params, const std::string & value) {
  642. params.prompt = value;
  643. }
  644. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  645. add_opt(common_arg(
  646. {"--no-perf"},
  647. string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
  648. [](common_params & params) {
  649. params.no_perf = true;
  650. params.sampling.no_perf = true;
  651. }
  652. ).set_env("LLAMA_ARG_NO_PERF"));
  653. add_opt(common_arg(
  654. {"-f", "--file"}, "FNAME",
  655. "a file containing the prompt (default: none)",
  656. [](common_params & params, const std::string & value) {
  657. std::ifstream file(value);
  658. if (!file) {
  659. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  660. }
  661. // store the external file name in params
  662. params.prompt_file = value;
  663. std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
  664. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  665. params.prompt.pop_back();
  666. }
  667. }
  668. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  669. add_opt(common_arg(
  670. {"--in-file"}, "FNAME",
  671. "an input file (repeat to specify multiple files)",
  672. [](common_params & params, const std::string & value) {
  673. std::ifstream file(value);
  674. if (!file) {
  675. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  676. }
  677. params.in_files.push_back(value);
  678. }
  679. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  680. add_opt(common_arg(
  681. {"-bf", "--binary-file"}, "FNAME",
  682. "binary file containing the prompt (default: none)",
  683. [](common_params & params, const std::string & value) {
  684. std::ifstream file(value, std::ios::binary);
  685. if (!file) {
  686. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  687. }
  688. // store the external file name in params
  689. params.prompt_file = value;
  690. std::ostringstream ss;
  691. ss << file.rdbuf();
  692. params.prompt = ss.str();
  693. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  694. }
  695. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  696. add_opt(common_arg(
  697. {"-e", "--escape"},
  698. string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  699. [](common_params & params) {
  700. params.escape = true;
  701. }
  702. ));
  703. add_opt(common_arg(
  704. {"--no-escape"},
  705. "do not process escape sequences",
  706. [](common_params & params) {
  707. params.escape = false;
  708. }
  709. ));
  710. add_opt(common_arg(
  711. {"-ptc", "--print-token-count"}, "N",
  712. string_format("print token count every N tokens (default: %d)", params.n_print),
  713. [](common_params & params, int value) {
  714. params.n_print = value;
  715. }
  716. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  717. add_opt(common_arg(
  718. {"--prompt-cache"}, "FNAME",
  719. "file to cache prompt state for faster startup (default: none)",
  720. [](common_params & params, const std::string & value) {
  721. params.path_prompt_cache = value;
  722. }
  723. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  724. add_opt(common_arg(
  725. {"--prompt-cache-all"},
  726. "if specified, saves user input and generations to cache as well\n",
  727. [](common_params & params) {
  728. params.prompt_cache_all = true;
  729. }
  730. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  731. add_opt(common_arg(
  732. {"--prompt-cache-ro"},
  733. "if specified, uses the prompt cache but does not update it",
  734. [](common_params & params) {
  735. params.prompt_cache_ro = true;
  736. }
  737. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  738. add_opt(common_arg(
  739. {"-r", "--reverse-prompt"}, "PROMPT",
  740. "halt generation at PROMPT, return control in interactive mode\n",
  741. [](common_params & params, const std::string & value) {
  742. params.antiprompt.emplace_back(value);
  743. }
  744. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  745. add_opt(common_arg(
  746. {"-sp", "--special"},
  747. string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  748. [](common_params & params) {
  749. params.special = true;
  750. }
  751. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  752. add_opt(common_arg(
  753. {"-cnv", "--conversation"},
  754. "run in conversation mode:\n"
  755. "- does not print special tokens and suffix/prefix\n"
  756. "- interactive mode is also enabled\n"
  757. "(default: auto enabled if chat template is available)",
  758. [](common_params & params) {
  759. params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
  760. }
  761. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  762. add_opt(common_arg(
  763. {"-no-cnv", "--no-conversation"},
  764. "force disable conversation mode (default: false)",
  765. [](common_params & params) {
  766. params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
  767. }
  768. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  769. add_opt(common_arg(
  770. {"-i", "--interactive"},
  771. string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  772. [](common_params & params) {
  773. params.interactive = true;
  774. }
  775. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  776. add_opt(common_arg(
  777. {"-if", "--interactive-first"},
  778. string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  779. [](common_params & params) {
  780. params.interactive_first = true;
  781. }
  782. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  783. add_opt(common_arg(
  784. {"-mli", "--multiline-input"},
  785. "allows you to write or paste multiple lines without ending each in '\\'",
  786. [](common_params & params) {
  787. params.multiline_input = true;
  788. }
  789. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  790. add_opt(common_arg(
  791. {"--in-prefix-bos"},
  792. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  793. [](common_params & params) {
  794. params.input_prefix_bos = true;
  795. params.enable_chat_template = false;
  796. }
  797. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  798. add_opt(common_arg(
  799. {"--in-prefix"}, "STRING",
  800. "string to prefix user inputs with (default: empty)",
  801. [](common_params & params, const std::string & value) {
  802. params.input_prefix = value;
  803. params.enable_chat_template = false;
  804. }
  805. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  806. add_opt(common_arg(
  807. {"--in-suffix"}, "STRING",
  808. "string to suffix after user inputs with (default: empty)",
  809. [](common_params & params, const std::string & value) {
  810. params.input_suffix = value;
  811. params.enable_chat_template = false;
  812. }
  813. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  814. add_opt(common_arg(
  815. {"--no-warmup"},
  816. "skip warming up the model with an empty run",
  817. [](common_params & params) {
  818. params.warmup = false;
  819. }
  820. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  821. add_opt(common_arg(
  822. {"--spm-infill"},
  823. string_format(
  824. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  825. params.spm_infill ? "enabled" : "disabled"
  826. ),
  827. [](common_params & params) {
  828. params.spm_infill = true;
  829. }
  830. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
  831. add_opt(common_arg(
  832. {"--samplers"}, "SAMPLERS",
  833. string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  834. [](common_params & params, const std::string & value) {
  835. const auto sampler_names = string_split<std::string>(value, ';');
  836. params.sampling.samplers = common_sampler_types_from_names(sampler_names, true);
  837. }
  838. ).set_sparam());
  839. add_opt(common_arg(
  840. {"-s", "--seed"}, "SEED",
  841. string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED),
  842. [](common_params & params, const std::string & value) {
  843. params.sampling.seed = std::stoul(value);
  844. }
  845. ).set_sparam());
  846. add_opt(common_arg(
  847. {"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
  848. string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  849. [](common_params & params, const std::string & value) {
  850. params.sampling.samplers = common_sampler_types_from_chars(value);
  851. }
  852. ).set_sparam());
  853. add_opt(common_arg(
  854. {"--ignore-eos"},
  855. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  856. [](common_params & params) {
  857. params.sampling.ignore_eos = true;
  858. }
  859. ).set_sparam());
  860. add_opt(common_arg(
  861. {"--temp"}, "N",
  862. string_format("temperature (default: %.1f)", (double)params.sampling.temp),
  863. [](common_params & params, const std::string & value) {
  864. params.sampling.temp = std::stof(value);
  865. params.sampling.temp = std::max(params.sampling.temp, 0.0f);
  866. }
  867. ).set_sparam());
  868. add_opt(common_arg(
  869. {"--top-k"}, "N",
  870. string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k),
  871. [](common_params & params, int value) {
  872. params.sampling.top_k = value;
  873. }
  874. ).set_sparam());
  875. add_opt(common_arg(
  876. {"--top-p"}, "N",
  877. string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p),
  878. [](common_params & params, const std::string & value) {
  879. params.sampling.top_p = std::stof(value);
  880. }
  881. ).set_sparam());
  882. add_opt(common_arg(
  883. {"--min-p"}, "N",
  884. string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p),
  885. [](common_params & params, const std::string & value) {
  886. params.sampling.min_p = std::stof(value);
  887. }
  888. ).set_sparam());
  889. add_opt(common_arg(
  890. {"--xtc-probability"}, "N",
  891. string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
  892. [](common_params & params, const std::string & value) {
  893. params.sampling.xtc_probability = std::stof(value);
  894. }
  895. ).set_sparam());
  896. add_opt(common_arg(
  897. {"--xtc-threshold"}, "N",
  898. string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold),
  899. [](common_params & params, const std::string & value) {
  900. params.sampling.xtc_threshold = std::stof(value);
  901. }
  902. ).set_sparam());
  903. add_opt(common_arg(
  904. {"--typical"}, "N",
  905. string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p),
  906. [](common_params & params, const std::string & value) {
  907. params.sampling.typ_p = std::stof(value);
  908. }
  909. ).set_sparam());
  910. add_opt(common_arg(
  911. {"--repeat-last-n"}, "N",
  912. string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
  913. [](common_params & params, int value) {
  914. if (value < -1) {
  915. throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
  916. }
  917. params.sampling.penalty_last_n = value;
  918. params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
  919. }
  920. ).set_sparam());
  921. add_opt(common_arg(
  922. {"--repeat-penalty"}, "N",
  923. string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat),
  924. [](common_params & params, const std::string & value) {
  925. params.sampling.penalty_repeat = std::stof(value);
  926. }
  927. ).set_sparam());
  928. add_opt(common_arg(
  929. {"--presence-penalty"}, "N",
  930. string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present),
  931. [](common_params & params, const std::string & value) {
  932. params.sampling.penalty_present = std::stof(value);
  933. }
  934. ).set_sparam());
  935. add_opt(common_arg(
  936. {"--frequency-penalty"}, "N",
  937. string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq),
  938. [](common_params & params, const std::string & value) {
  939. params.sampling.penalty_freq = std::stof(value);
  940. }
  941. ).set_sparam());
  942. add_opt(common_arg(
  943. {"--dry-multiplier"}, "N",
  944. string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier),
  945. [](common_params & params, const std::string & value) {
  946. params.sampling.dry_multiplier = std::stof(value);
  947. }
  948. ).set_sparam());
  949. add_opt(common_arg(
  950. {"--dry-base"}, "N",
  951. string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base),
  952. [](common_params & params, const std::string & value) {
  953. float potential_base = std::stof(value);
  954. if (potential_base >= 1.0f)
  955. {
  956. params.sampling.dry_base = potential_base;
  957. }
  958. }
  959. ).set_sparam());
  960. add_opt(common_arg(
  961. {"--dry-allowed-length"}, "N",
  962. string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length),
  963. [](common_params & params, int value) {
  964. params.sampling.dry_allowed_length = value;
  965. }
  966. ).set_sparam());
  967. add_opt(common_arg(
  968. {"--dry-penalty-last-n"}, "N",
  969. string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
  970. [](common_params & params, int value) {
  971. if (value < -1) {
  972. throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
  973. }
  974. params.sampling.dry_penalty_last_n = value;
  975. }
  976. ).set_sparam());
  977. add_opt(common_arg(
  978. {"--dry-sequence-breaker"}, "STRING",
  979. string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n",
  980. params.sampling.dry_sequence_breakers.empty() ? "none" :
  981. std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()),
  982. params.sampling.dry_sequence_breakers.end(),
  983. std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'",
  984. [](const std::string& a, const std::string& b) {
  985. std::string formatted_b = (b == "\n") ? "\\n" : b;
  986. return a + ", '" + formatted_b + "'";
  987. }).c_str()),
  988. [](common_params & params, const std::string & value) {
  989. static bool defaults_cleared = false;
  990. if (!defaults_cleared) {
  991. params.sampling.dry_sequence_breakers.clear();
  992. defaults_cleared = true;
  993. }
  994. if (value == "none") {
  995. params.sampling.dry_sequence_breakers.clear();
  996. } else {
  997. params.sampling.dry_sequence_breakers.emplace_back(value);
  998. }
  999. }
  1000. ).set_sparam());
  1001. add_opt(common_arg(
  1002. {"--dynatemp-range"}, "N",
  1003. string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range),
  1004. [](common_params & params, const std::string & value) {
  1005. params.sampling.dynatemp_range = std::stof(value);
  1006. }
  1007. ).set_sparam());
  1008. add_opt(common_arg(
  1009. {"--dynatemp-exp"}, "N",
  1010. string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent),
  1011. [](common_params & params, const std::string & value) {
  1012. params.sampling.dynatemp_exponent = std::stof(value);
  1013. }
  1014. ).set_sparam());
  1015. add_opt(common_arg(
  1016. {"--mirostat"}, "N",
  1017. string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n"
  1018. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat),
  1019. [](common_params & params, int value) {
  1020. params.sampling.mirostat = value;
  1021. }
  1022. ).set_sparam());
  1023. add_opt(common_arg(
  1024. {"--mirostat-lr"}, "N",
  1025. string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta),
  1026. [](common_params & params, const std::string & value) {
  1027. params.sampling.mirostat_eta = std::stof(value);
  1028. }
  1029. ).set_sparam());
  1030. add_opt(common_arg(
  1031. {"--mirostat-ent"}, "N",
  1032. string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau),
  1033. [](common_params & params, const std::string & value) {
  1034. params.sampling.mirostat_tau = std::stof(value);
  1035. }
  1036. ).set_sparam());
  1037. add_opt(common_arg(
  1038. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1039. "modifies the likelihood of token appearing in the completion,\n"
  1040. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1041. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1042. [](common_params & params, const std::string & value) {
  1043. std::stringstream ss(value);
  1044. llama_token key;
  1045. char sign;
  1046. std::string value_str;
  1047. try {
  1048. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1049. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1050. params.sampling.logit_bias.push_back({key, bias});
  1051. } else {
  1052. throw std::invalid_argument("invalid input format");
  1053. }
  1054. } catch (const std::exception&) {
  1055. throw std::invalid_argument("invalid input format");
  1056. }
  1057. }
  1058. ).set_sparam());
  1059. add_opt(common_arg(
  1060. {"--grammar"}, "GRAMMAR",
  1061. string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()),
  1062. [](common_params & params, const std::string & value) {
  1063. params.sampling.grammar = value;
  1064. }
  1065. ).set_sparam());
  1066. add_opt(common_arg(
  1067. {"--grammar-file"}, "FNAME",
  1068. "file to read grammar from",
  1069. [](common_params & params, const std::string & value) {
  1070. std::ifstream file(value);
  1071. if (!file) {
  1072. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1073. }
  1074. std::copy(
  1075. std::istreambuf_iterator<char>(file),
  1076. std::istreambuf_iterator<char>(),
  1077. std::back_inserter(params.sampling.grammar)
  1078. );
  1079. }
  1080. ).set_sparam());
  1081. add_opt(common_arg(
  1082. {"-j", "--json-schema"}, "SCHEMA",
  1083. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1084. [](common_params & params, const std::string & value) {
  1085. params.sampling.grammar = json_schema_to_grammar(json::parse(value));
  1086. }
  1087. ).set_sparam());
  1088. add_opt(common_arg(
  1089. {"--pooling"}, "{none,mean,cls,last,rank}",
  1090. "pooling type for embeddings, use model default if unspecified",
  1091. [](common_params & params, const std::string & value) {
  1092. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1093. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1094. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1095. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1096. else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; }
  1097. else { throw std::invalid_argument("invalid value"); }
  1098. }
  1099. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING"));
  1100. add_opt(common_arg(
  1101. {"--attention"}, "{causal,non-causal}",
  1102. "attention type for embeddings, use model default if unspecified",
  1103. [](common_params & params, const std::string & value) {
  1104. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1105. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1106. else { throw std::invalid_argument("invalid value"); }
  1107. }
  1108. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1109. add_opt(common_arg(
  1110. {"--rope-scaling"}, "{none,linear,yarn}",
  1111. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1112. [](common_params & params, const std::string & value) {
  1113. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1114. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1115. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1116. else { throw std::invalid_argument("invalid value"); }
  1117. }
  1118. ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE"));
  1119. add_opt(common_arg(
  1120. {"--rope-scale"}, "N",
  1121. "RoPE context scaling factor, expands context by a factor of N",
  1122. [](common_params & params, const std::string & value) {
  1123. params.rope_freq_scale = 1.0f / std::stof(value);
  1124. }
  1125. ).set_env("LLAMA_ARG_ROPE_SCALE"));
  1126. add_opt(common_arg(
  1127. {"--rope-freq-base"}, "N",
  1128. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1129. [](common_params & params, const std::string & value) {
  1130. params.rope_freq_base = std::stof(value);
  1131. }
  1132. ).set_env("LLAMA_ARG_ROPE_FREQ_BASE"));
  1133. add_opt(common_arg(
  1134. {"--rope-freq-scale"}, "N",
  1135. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1136. [](common_params & params, const std::string & value) {
  1137. params.rope_freq_scale = std::stof(value);
  1138. }
  1139. ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE"));
  1140. add_opt(common_arg(
  1141. {"--yarn-orig-ctx"}, "N",
  1142. string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1143. [](common_params & params, int value) {
  1144. params.yarn_orig_ctx = value;
  1145. }
  1146. ).set_env("LLAMA_ARG_YARN_ORIG_CTX"));
  1147. add_opt(common_arg(
  1148. {"--yarn-ext-factor"}, "N",
  1149. string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1150. [](common_params & params, const std::string & value) {
  1151. params.yarn_ext_factor = std::stof(value);
  1152. }
  1153. ).set_env("LLAMA_ARG_YARN_EXT_FACTOR"));
  1154. add_opt(common_arg(
  1155. {"--yarn-attn-factor"}, "N",
  1156. string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1157. [](common_params & params, const std::string & value) {
  1158. params.yarn_attn_factor = std::stof(value);
  1159. }
  1160. ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR"));
  1161. add_opt(common_arg(
  1162. {"--yarn-beta-slow"}, "N",
  1163. string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1164. [](common_params & params, const std::string & value) {
  1165. params.yarn_beta_slow = std::stof(value);
  1166. }
  1167. ).set_env("LLAMA_ARG_YARN_BETA_SLOW"));
  1168. add_opt(common_arg(
  1169. {"--yarn-beta-fast"}, "N",
  1170. string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1171. [](common_params & params, const std::string & value) {
  1172. params.yarn_beta_fast = std::stof(value);
  1173. }
  1174. ).set_env("LLAMA_ARG_YARN_BETA_FAST"));
  1175. add_opt(common_arg(
  1176. {"-gan", "--grp-attn-n"}, "N",
  1177. string_format("group-attention factor (default: %d)", params.grp_attn_n),
  1178. [](common_params & params, int value) {
  1179. params.grp_attn_n = value;
  1180. }
  1181. ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_PASSKEY}));
  1182. add_opt(common_arg(
  1183. {"-gaw", "--grp-attn-w"}, "N",
  1184. string_format("group-attention width (default: %d)", params.grp_attn_w),
  1185. [](common_params & params, int value) {
  1186. params.grp_attn_w = value;
  1187. }
  1188. ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_MAIN}));
  1189. add_opt(common_arg(
  1190. {"-dkvc", "--dump-kv-cache"},
  1191. "verbose print of the KV cache",
  1192. [](common_params & params) {
  1193. params.dump_kv_cache = true;
  1194. }
  1195. ));
  1196. add_opt(common_arg(
  1197. {"-nkvo", "--no-kv-offload"},
  1198. "disable KV offload",
  1199. [](common_params & params) {
  1200. params.no_kv_offload = true;
  1201. }
  1202. ).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
  1203. add_opt(common_arg(
  1204. {"-ctk", "--cache-type-k"}, "TYPE",
  1205. string_format(
  1206. "KV cache data type for K\n"
  1207. "allowed values: %s\n"
  1208. "(default: %s)",
  1209. get_all_kv_cache_types().c_str(),
  1210. ggml_type_name(params.cache_type_k)
  1211. ),
  1212. [](common_params & params, const std::string & value) {
  1213. params.cache_type_k = kv_cache_type_from_str(value);
  1214. }
  1215. ).set_env("LLAMA_ARG_CACHE_TYPE_K"));
  1216. add_opt(common_arg(
  1217. {"-ctv", "--cache-type-v"}, "TYPE",
  1218. string_format(
  1219. "KV cache data type for V\n"
  1220. "allowed values: %s\n"
  1221. "(default: %s)",
  1222. get_all_kv_cache_types().c_str(),
  1223. ggml_type_name(params.cache_type_v)
  1224. ),
  1225. [](common_params & params, const std::string & value) {
  1226. params.cache_type_v = kv_cache_type_from_str(value);
  1227. }
  1228. ).set_env("LLAMA_ARG_CACHE_TYPE_V"));
  1229. add_opt(common_arg(
  1230. {"--perplexity", "--all-logits"},
  1231. string_format("return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false"),
  1232. [](common_params & params) {
  1233. params.logits_all = true;
  1234. }
  1235. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1236. add_opt(common_arg(
  1237. {"--hellaswag"},
  1238. "compute HellaSwag score over random tasks from datafile supplied with -f",
  1239. [](common_params & params) {
  1240. params.hellaswag = true;
  1241. }
  1242. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1243. add_opt(common_arg(
  1244. {"--hellaswag-tasks"}, "N",
  1245. string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  1246. [](common_params & params, int value) {
  1247. params.hellaswag_tasks = value;
  1248. }
  1249. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1250. add_opt(common_arg(
  1251. {"--winogrande"},
  1252. "compute Winogrande score over random tasks from datafile supplied with -f",
  1253. [](common_params & params) {
  1254. params.winogrande = true;
  1255. }
  1256. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1257. add_opt(common_arg(
  1258. {"--winogrande-tasks"}, "N",
  1259. string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  1260. [](common_params & params, int value) {
  1261. params.winogrande_tasks = value;
  1262. }
  1263. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1264. add_opt(common_arg(
  1265. {"--multiple-choice"},
  1266. "compute multiple choice score over random tasks from datafile supplied with -f",
  1267. [](common_params & params) {
  1268. params.multiple_choice = true;
  1269. }
  1270. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1271. add_opt(common_arg(
  1272. {"--multiple-choice-tasks"}, "N",
  1273. string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  1274. [](common_params & params, int value) {
  1275. params.multiple_choice_tasks = value;
  1276. }
  1277. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1278. add_opt(common_arg(
  1279. {"--kl-divergence"},
  1280. "computes KL-divergence to logits provided via --kl-divergence-base",
  1281. [](common_params & params) {
  1282. params.kl_divergence = true;
  1283. }
  1284. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1285. add_opt(common_arg(
  1286. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  1287. "set logits file",
  1288. [](common_params & params, const std::string & value) {
  1289. params.logits_file = value;
  1290. }
  1291. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1292. add_opt(common_arg(
  1293. {"--ppl-stride"}, "N",
  1294. string_format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  1295. [](common_params & params, int value) {
  1296. params.ppl_stride = value;
  1297. }
  1298. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1299. add_opt(common_arg(
  1300. {"--ppl-output-type"}, "<0|1>",
  1301. string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  1302. [](common_params & params, int value) {
  1303. params.ppl_output_type = value;
  1304. }
  1305. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1306. add_opt(common_arg(
  1307. {"-dt", "--defrag-thold"}, "N",
  1308. string_format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold),
  1309. [](common_params & params, const std::string & value) {
  1310. params.defrag_thold = std::stof(value);
  1311. }
  1312. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  1313. add_opt(common_arg(
  1314. {"-np", "--parallel"}, "N",
  1315. string_format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  1316. [](common_params & params, int value) {
  1317. params.n_parallel = value;
  1318. }
  1319. ).set_env("LLAMA_ARG_N_PARALLEL"));
  1320. add_opt(common_arg(
  1321. {"-ns", "--sequences"}, "N",
  1322. string_format("number of sequences to decode (default: %d)", params.n_sequences),
  1323. [](common_params & params, int value) {
  1324. params.n_sequences = value;
  1325. }
  1326. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  1327. add_opt(common_arg(
  1328. {"-cb", "--cont-batching"},
  1329. string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  1330. [](common_params & params) {
  1331. params.cont_batching = true;
  1332. }
  1333. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  1334. add_opt(common_arg(
  1335. {"-nocb", "--no-cont-batching"},
  1336. "disable continuous batching",
  1337. [](common_params & params) {
  1338. params.cont_batching = false;
  1339. }
  1340. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  1341. add_opt(common_arg(
  1342. {"--mmproj"}, "FILE",
  1343. "path to a multimodal projector file for LLaVA. see examples/llava/README.md",
  1344. [](common_params & params, const std::string & value) {
  1345. params.mmproj = value;
  1346. }
  1347. ).set_examples({LLAMA_EXAMPLE_LLAVA}));
  1348. add_opt(common_arg(
  1349. {"--image"}, "FILE",
  1350. "path to an image file. use with multimodal models. Specify multiple times for batching",
  1351. [](common_params & params, const std::string & value) {
  1352. params.image.emplace_back(value);
  1353. }
  1354. ).set_examples({LLAMA_EXAMPLE_LLAVA}));
  1355. if (llama_supports_rpc()) {
  1356. add_opt(common_arg(
  1357. {"--rpc"}, "SERVERS",
  1358. "comma separated list of RPC servers",
  1359. [](common_params & params, const std::string & value) {
  1360. add_rpc_devices(value);
  1361. GGML_UNUSED(params);
  1362. }
  1363. ).set_env("LLAMA_ARG_RPC"));
  1364. }
  1365. add_opt(common_arg(
  1366. {"--mlock"},
  1367. "force system to keep model in RAM rather than swapping or compressing",
  1368. [](common_params & params) {
  1369. params.use_mlock = true;
  1370. }
  1371. ).set_env("LLAMA_ARG_MLOCK"));
  1372. add_opt(common_arg(
  1373. {"--no-mmap"},
  1374. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  1375. [](common_params & params) {
  1376. params.use_mmap = false;
  1377. }
  1378. ).set_env("LLAMA_ARG_NO_MMAP"));
  1379. add_opt(common_arg(
  1380. {"--numa"}, "TYPE",
  1381. "attempt optimizations that help on some NUMA systems\n"
  1382. "- distribute: spread execution evenly over all nodes\n"
  1383. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  1384. "- numactl: use the CPU map provided by numactl\n"
  1385. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  1386. "see https://github.com/ggerganov/llama.cpp/issues/1437",
  1387. [](common_params & params, const std::string & value) {
  1388. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  1389. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  1390. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  1391. else { throw std::invalid_argument("invalid value"); }
  1392. }
  1393. ).set_env("LLAMA_ARG_NUMA"));
  1394. add_opt(common_arg(
  1395. {"-dev", "--device"}, "<dev1,dev2,..>",
  1396. "comma-separated list of devices to use for offloading (none = don't offload)\n"
  1397. "use --list-devices to see a list of available devices",
  1398. [](common_params & params, const std::string & value) {
  1399. params.devices = parse_device_list(value);
  1400. }
  1401. ).set_env("LLAMA_ARG_DEVICE"));
  1402. add_opt(common_arg(
  1403. {"--list-devices"},
  1404. "print list of available devices and exit",
  1405. [](common_params &) {
  1406. printf("Available devices:\n");
  1407. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  1408. auto * dev = ggml_backend_dev_get(i);
  1409. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
  1410. size_t free, total;
  1411. ggml_backend_dev_memory(dev, &free, &total);
  1412. printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
  1413. }
  1414. }
  1415. exit(0);
  1416. }
  1417. ));
  1418. add_opt(common_arg(
  1419. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  1420. "number of layers to store in VRAM",
  1421. [](common_params & params, int value) {
  1422. params.n_gpu_layers = value;
  1423. if (!llama_supports_gpu_offload()) {
  1424. fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
  1425. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  1426. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  1427. }
  1428. }
  1429. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  1430. add_opt(common_arg(
  1431. {"-sm", "--split-mode"}, "{none,layer,row}",
  1432. "how to split the model across multiple GPUs, one of:\n"
  1433. "- none: use one GPU only\n"
  1434. "- layer (default): split layers and KV across GPUs\n"
  1435. "- row: split rows across GPUs",
  1436. [](common_params & params, const std::string & value) {
  1437. std::string arg_next = value;
  1438. if (arg_next == "none") {
  1439. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  1440. } else if (arg_next == "layer") {
  1441. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  1442. } else if (arg_next == "row") {
  1443. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  1444. } else {
  1445. throw std::invalid_argument("invalid value");
  1446. }
  1447. if (!llama_supports_gpu_offload()) {
  1448. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n");
  1449. }
  1450. }
  1451. ).set_env("LLAMA_ARG_SPLIT_MODE"));
  1452. add_opt(common_arg(
  1453. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  1454. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  1455. [](common_params & params, const std::string & value) {
  1456. std::string arg_next = value;
  1457. // split string by , and /
  1458. const std::regex regex{ R"([,/]+)" };
  1459. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  1460. std::vector<std::string> split_arg{ it, {} };
  1461. if (split_arg.size() >= llama_max_devices()) {
  1462. throw std::invalid_argument(
  1463. string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  1464. );
  1465. }
  1466. for (size_t i = 0; i < llama_max_devices(); ++i) {
  1467. if (i < split_arg.size()) {
  1468. params.tensor_split[i] = std::stof(split_arg[i]);
  1469. } else {
  1470. params.tensor_split[i] = 0.0f;
  1471. }
  1472. }
  1473. if (!llama_supports_gpu_offload()) {
  1474. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n");
  1475. }
  1476. }
  1477. ).set_env("LLAMA_ARG_TENSOR_SPLIT"));
  1478. add_opt(common_arg(
  1479. {"-mg", "--main-gpu"}, "INDEX",
  1480. string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  1481. [](common_params & params, int value) {
  1482. params.main_gpu = value;
  1483. if (!llama_supports_gpu_offload()) {
  1484. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n");
  1485. }
  1486. }
  1487. ).set_env("LLAMA_ARG_MAIN_GPU"));
  1488. add_opt(common_arg(
  1489. {"--check-tensors"},
  1490. string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  1491. [](common_params & params) {
  1492. params.check_tensors = true;
  1493. }
  1494. ));
  1495. add_opt(common_arg(
  1496. {"--override-kv"}, "KEY=TYPE:VALUE",
  1497. "advanced option to override model metadata by key. may be specified multiple times.\n"
  1498. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  1499. [](common_params & params, const std::string & value) {
  1500. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  1501. throw std::runtime_error(string_format("error: Invalid type for KV override: %s\n", value.c_str()));
  1502. }
  1503. }
  1504. ));
  1505. add_opt(common_arg(
  1506. {"--lora"}, "FNAME",
  1507. "path to LoRA adapter (can be repeated to use multiple adapters)",
  1508. [](common_params & params, const std::string & value) {
  1509. params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
  1510. }
  1511. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1512. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1513. add_opt(common_arg(
  1514. {"--lora-scaled"}, "FNAME", "SCALE",
  1515. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  1516. [](common_params & params, const std::string & fname, const std::string & scale) {
  1517. params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
  1518. }
  1519. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1520. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1521. add_opt(common_arg(
  1522. {"--control-vector"}, "FNAME",
  1523. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  1524. [](common_params & params, const std::string & value) {
  1525. params.control_vectors.push_back({ 1.0f, value, });
  1526. }
  1527. ));
  1528. add_opt(common_arg(
  1529. {"--control-vector-scaled"}, "FNAME", "SCALE",
  1530. "add a control vector with user defined scaling SCALE\n"
  1531. "note: this argument can be repeated to add multiple scaled control vectors",
  1532. [](common_params & params, const std::string & fname, const std::string & scale) {
  1533. params.control_vectors.push_back({ std::stof(scale), fname });
  1534. }
  1535. ));
  1536. add_opt(common_arg(
  1537. {"--control-vector-layer-range"}, "START", "END",
  1538. "layer range to apply the control vector(s) to, start and end inclusive",
  1539. [](common_params & params, const std::string & start, const std::string & end) {
  1540. params.control_vector_layer_start = std::stoi(start);
  1541. params.control_vector_layer_end = std::stoi(end);
  1542. }
  1543. ));
  1544. add_opt(common_arg(
  1545. {"-a", "--alias"}, "STRING",
  1546. "set alias for model name (to be used by REST API)",
  1547. [](common_params & params, const std::string & value) {
  1548. params.model_alias = value;
  1549. }
  1550. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS"));
  1551. add_opt(common_arg(
  1552. {"-m", "--model"}, "FNAME",
  1553. ex == LLAMA_EXAMPLE_EXPORT_LORA
  1554. ? std::string("model path from which to load base model")
  1555. : string_format(
  1556. "model path (default: `models/$filename` with filename from `--hf-file` "
  1557. "or `--model-url` if set, otherwise %s)", DEFAULT_MODEL_PATH
  1558. ),
  1559. [](common_params & params, const std::string & value) {
  1560. params.model = value;
  1561. }
  1562. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  1563. add_opt(common_arg(
  1564. {"-mu", "--model-url"}, "MODEL_URL",
  1565. "model download url (default: unused)",
  1566. [](common_params & params, const std::string & value) {
  1567. params.model_url = value;
  1568. }
  1569. ).set_env("LLAMA_ARG_MODEL_URL"));
  1570. add_opt(common_arg(
  1571. {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]",
  1572. "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
  1573. "example: unsloth/phi-4-GGUF:q4_k_m\n"
  1574. "(default: unused)",
  1575. [](common_params & params, const std::string & value) {
  1576. params.hf_repo = value;
  1577. }
  1578. ).set_env("LLAMA_ARG_HF_REPO"));
  1579. add_opt(common_arg(
  1580. {"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
  1581. "Same as --hf-repo, but for the draft model (default: unused)",
  1582. [](common_params & params, const std::string & value) {
  1583. params.speculative.hf_repo = value;
  1584. }
  1585. ).set_env("LLAMA_ARG_HFD_REPO"));
  1586. add_opt(common_arg(
  1587. {"-hff", "--hf-file"}, "FILE",
  1588. "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
  1589. [](common_params & params, const std::string & value) {
  1590. params.hf_file = value;
  1591. }
  1592. ).set_env("LLAMA_ARG_HF_FILE"));
  1593. add_opt(common_arg(
  1594. {"-hfv", "-hfrv", "--hf-repo-v"}, "<user>/<model>[:quant]",
  1595. "Hugging Face model repository for the vocoder model (default: unused)",
  1596. [](common_params & params, const std::string & value) {
  1597. params.vocoder.hf_repo = value;
  1598. }
  1599. ).set_env("LLAMA_ARG_HF_REPO_V"));
  1600. add_opt(common_arg(
  1601. {"-hffv", "--hf-file-v"}, "FILE",
  1602. "Hugging Face model file for the vocoder model (default: unused)",
  1603. [](common_params & params, const std::string & value) {
  1604. params.vocoder.hf_file = value;
  1605. }
  1606. ).set_env("LLAMA_ARG_HF_FILE_V"));
  1607. add_opt(common_arg(
  1608. {"-hft", "--hf-token"}, "TOKEN",
  1609. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  1610. [](common_params & params, const std::string & value) {
  1611. params.hf_token = value;
  1612. }
  1613. ).set_env("HF_TOKEN"));
  1614. add_opt(common_arg(
  1615. {"--context-file"}, "FNAME",
  1616. "file to load context from (repeat to specify multiple files)",
  1617. [](common_params & params, const std::string & value) {
  1618. std::ifstream file(value, std::ios::binary);
  1619. if (!file) {
  1620. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1621. }
  1622. params.context_files.push_back(value);
  1623. }
  1624. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1625. add_opt(common_arg(
  1626. {"--chunk-size"}, "N",
  1627. string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  1628. [](common_params & params, int value) {
  1629. params.chunk_size = value;
  1630. }
  1631. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1632. add_opt(common_arg(
  1633. {"--chunk-separator"}, "STRING",
  1634. string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  1635. [](common_params & params, const std::string & value) {
  1636. params.chunk_separator = value;
  1637. }
  1638. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  1639. add_opt(common_arg(
  1640. {"--junk"}, "N",
  1641. string_format("number of times to repeat the junk text (default: %d)", params.n_junk),
  1642. [](common_params & params, int value) {
  1643. params.n_junk = value;
  1644. }
  1645. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  1646. add_opt(common_arg(
  1647. {"--pos"}, "N",
  1648. string_format("position of the passkey in the junk text (default: %d)", params.i_pos),
  1649. [](common_params & params, int value) {
  1650. params.i_pos = value;
  1651. }
  1652. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  1653. add_opt(common_arg(
  1654. {"-o", "--output", "--output-file"}, "FNAME",
  1655. string_format("output file (default: '%s')",
  1656. ex == LLAMA_EXAMPLE_EXPORT_LORA
  1657. ? params.lora_outfile.c_str()
  1658. : ex == LLAMA_EXAMPLE_CVECTOR_GENERATOR
  1659. ? params.cvector_outfile.c_str()
  1660. : params.out_file.c_str()),
  1661. [](common_params & params, const std::string & value) {
  1662. params.out_file = value;
  1663. params.cvector_outfile = value;
  1664. params.lora_outfile = value;
  1665. }
  1666. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA}));
  1667. add_opt(common_arg(
  1668. {"-ofreq", "--output-frequency"}, "N",
  1669. string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  1670. [](common_params & params, int value) {
  1671. params.n_out_freq = value;
  1672. }
  1673. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1674. add_opt(common_arg(
  1675. {"--save-frequency"}, "N",
  1676. string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  1677. [](common_params & params, int value) {
  1678. params.n_save_freq = value;
  1679. }
  1680. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1681. add_opt(common_arg(
  1682. {"--process-output"},
  1683. string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  1684. [](common_params & params) {
  1685. params.process_output = true;
  1686. }
  1687. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1688. add_opt(common_arg(
  1689. {"--no-ppl"},
  1690. string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  1691. [](common_params & params) {
  1692. params.compute_ppl = false;
  1693. }
  1694. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1695. add_opt(common_arg(
  1696. {"--chunk", "--from-chunk"}, "N",
  1697. string_format("start processing the input from chunk N (default: %d)", params.i_chunk),
  1698. [](common_params & params, int value) {
  1699. params.i_chunk = value;
  1700. }
  1701. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1702. add_opt(common_arg(
  1703. {"-pps"},
  1704. string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  1705. [](common_params & params) {
  1706. params.is_pp_shared = true;
  1707. }
  1708. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1709. add_opt(common_arg(
  1710. {"-npp"}, "n0,n1,...",
  1711. "number of prompt tokens",
  1712. [](common_params & params, const std::string & value) {
  1713. auto p = string_split<int>(value, ',');
  1714. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  1715. }
  1716. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1717. add_opt(common_arg(
  1718. {"-ntg"}, "n0,n1,...",
  1719. "number of text generation tokens",
  1720. [](common_params & params, const std::string & value) {
  1721. auto p = string_split<int>(value, ',');
  1722. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  1723. }
  1724. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1725. add_opt(common_arg(
  1726. {"-npl"}, "n0,n1,...",
  1727. "number of parallel prompts",
  1728. [](common_params & params, const std::string & value) {
  1729. auto p = string_split<int>(value, ',');
  1730. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  1731. }
  1732. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1733. add_opt(common_arg(
  1734. {"--embd-normalize"}, "N",
  1735. string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  1736. [](common_params & params, int value) {
  1737. params.embd_normalize = value;
  1738. }
  1739. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1740. add_opt(common_arg(
  1741. {"--embd-output-format"}, "FORMAT",
  1742. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix",
  1743. [](common_params & params, const std::string & value) {
  1744. params.embd_out = value;
  1745. }
  1746. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1747. add_opt(common_arg(
  1748. {"--embd-separator"}, "STRING",
  1749. "separator of embeddings (default \\n) for example \"<#sep#>\"",
  1750. [](common_params & params, const std::string & value) {
  1751. params.embd_sep = value;
  1752. }
  1753. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1754. add_opt(common_arg(
  1755. {"--host"}, "HOST",
  1756. string_format("ip address to listen (default: %s)", params.hostname.c_str()),
  1757. [](common_params & params, const std::string & value) {
  1758. params.hostname = value;
  1759. }
  1760. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  1761. add_opt(common_arg(
  1762. {"--port"}, "PORT",
  1763. string_format("port to listen (default: %d)", params.port),
  1764. [](common_params & params, int value) {
  1765. params.port = value;
  1766. }
  1767. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  1768. add_opt(common_arg(
  1769. {"--path"}, "PATH",
  1770. string_format("path to serve static files from (default: %s)", params.public_path.c_str()),
  1771. [](common_params & params, const std::string & value) {
  1772. params.public_path = value;
  1773. }
  1774. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
  1775. add_opt(common_arg(
  1776. {"--no-webui"},
  1777. string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),
  1778. [](common_params & params) {
  1779. params.webui = false;
  1780. }
  1781. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI"));
  1782. add_opt(common_arg(
  1783. {"--embedding", "--embeddings"},
  1784. string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  1785. [](common_params & params) {
  1786. params.embedding = true;
  1787. }
  1788. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  1789. add_opt(common_arg(
  1790. {"--reranking", "--rerank"},
  1791. string_format("enable reranking endpoint on server (default: %s)", params.reranking ? "enabled" : "disabled"),
  1792. [](common_params & params) {
  1793. params.reranking = true;
  1794. }
  1795. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING"));
  1796. add_opt(common_arg(
  1797. {"--api-key"}, "KEY",
  1798. "API key to use for authentication (default: none)",
  1799. [](common_params & params, const std::string & value) {
  1800. params.api_keys.push_back(value);
  1801. }
  1802. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  1803. add_opt(common_arg(
  1804. {"--api-key-file"}, "FNAME",
  1805. "path to file containing API keys (default: none)",
  1806. [](common_params & params, const std::string & value) {
  1807. std::ifstream key_file(value);
  1808. if (!key_file) {
  1809. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1810. }
  1811. std::string key;
  1812. while (std::getline(key_file, key)) {
  1813. if (!key.empty()) {
  1814. params.api_keys.push_back(key);
  1815. }
  1816. }
  1817. key_file.close();
  1818. }
  1819. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1820. add_opt(common_arg(
  1821. {"--ssl-key-file"}, "FNAME",
  1822. "path to file a PEM-encoded SSL private key",
  1823. [](common_params & params, const std::string & value) {
  1824. params.ssl_file_key = value;
  1825. }
  1826. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE"));
  1827. add_opt(common_arg(
  1828. {"--ssl-cert-file"}, "FNAME",
  1829. "path to file a PEM-encoded SSL certificate",
  1830. [](common_params & params, const std::string & value) {
  1831. params.ssl_file_cert = value;
  1832. }
  1833. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE"));
  1834. add_opt(common_arg(
  1835. {"-to", "--timeout"}, "N",
  1836. string_format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  1837. [](common_params & params, int value) {
  1838. params.timeout_read = value;
  1839. params.timeout_write = value;
  1840. }
  1841. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT"));
  1842. add_opt(common_arg(
  1843. {"--threads-http"}, "N",
  1844. string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  1845. [](common_params & params, int value) {
  1846. params.n_threads_http = value;
  1847. }
  1848. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  1849. add_opt(common_arg(
  1850. {"--cache-reuse"}, "N",
  1851. string_format("min chunk size to attempt reusing from the cache via KV shifting (default: %d)", params.n_cache_reuse),
  1852. [](common_params & params, int value) {
  1853. params.n_cache_reuse = value;
  1854. }
  1855. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
  1856. add_opt(common_arg(
  1857. {"--metrics"},
  1858. string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  1859. [](common_params & params) {
  1860. params.endpoint_metrics = true;
  1861. }
  1862. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  1863. add_opt(common_arg(
  1864. {"--slots"},
  1865. string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  1866. [](common_params & params) {
  1867. params.endpoint_slots = true;
  1868. }
  1869. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
  1870. add_opt(common_arg(
  1871. {"--props"},
  1872. string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
  1873. [](common_params & params) {
  1874. params.endpoint_props = true;
  1875. }
  1876. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
  1877. add_opt(common_arg(
  1878. {"--no-slots"},
  1879. "disables slots monitoring endpoint",
  1880. [](common_params & params) {
  1881. params.endpoint_slots = false;
  1882. }
  1883. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  1884. add_opt(common_arg(
  1885. {"--slot-save-path"}, "PATH",
  1886. "path to save slot kv cache (default: disabled)",
  1887. [](common_params & params, const std::string & value) {
  1888. params.slot_save_path = value;
  1889. // if doesn't end with DIRECTORY_SEPARATOR, add it
  1890. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  1891. params.slot_save_path += DIRECTORY_SEPARATOR;
  1892. }
  1893. }
  1894. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1895. add_opt(common_arg(
  1896. {"--chat-template"}, "JINJA_TEMPLATE",
  1897. string_format(
  1898. "set custom jinja chat template (default: template taken from model's metadata)\n"
  1899. "if suffix/prefix are specified, template will be disabled\n"
  1900. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  1901. ),
  1902. [](common_params & params, const std::string & value) {
  1903. if (!common_chat_verify_template(value)) {
  1904. throw std::runtime_error(string_format(
  1905. "error: the supplied chat template is not supported: %s\n"
  1906. "note: llama.cpp does not use jinja parser, we only support commonly used templates\n",
  1907. value.c_str()
  1908. ));
  1909. }
  1910. params.chat_template = value;
  1911. }
  1912. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  1913. add_opt(common_arg(
  1914. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  1915. string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  1916. [](common_params & params, const std::string & value) {
  1917. params.slot_prompt_similarity = std::stof(value);
  1918. }
  1919. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1920. add_opt(common_arg(
  1921. {"--lora-init-without-apply"},
  1922. string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  1923. [](common_params & params) {
  1924. params.lora_init_without_apply = true;
  1925. }
  1926. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1927. add_opt(common_arg(
  1928. {"--simple-io"},
  1929. "use basic IO for better compatibility in subprocesses and limited consoles",
  1930. [](common_params & params) {
  1931. params.simple_io = true;
  1932. }
  1933. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  1934. add_opt(common_arg(
  1935. {"--positive-file"}, "FNAME",
  1936. string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  1937. [](common_params & params, const std::string & value) {
  1938. params.cvector_positive_file = value;
  1939. }
  1940. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1941. add_opt(common_arg(
  1942. {"--negative-file"}, "FNAME",
  1943. string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  1944. [](common_params & params, const std::string & value) {
  1945. params.cvector_negative_file = value;
  1946. }
  1947. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1948. add_opt(common_arg(
  1949. {"--pca-batch"}, "N",
  1950. string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  1951. [](common_params & params, int value) {
  1952. params.n_pca_batch = value;
  1953. }
  1954. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1955. add_opt(common_arg(
  1956. {"--pca-iter"}, "N",
  1957. string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  1958. [](common_params & params, int value) {
  1959. params.n_pca_iterations = value;
  1960. }
  1961. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1962. add_opt(common_arg(
  1963. {"--method"}, "{pca, mean}",
  1964. "dimensionality reduction method to be used (default: pca)",
  1965. [](common_params & params, const std::string & value) {
  1966. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  1967. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  1968. else { throw std::invalid_argument("invalid value"); }
  1969. }
  1970. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  1971. add_opt(common_arg(
  1972. {"--output-format"}, "{md,jsonl}",
  1973. "output format for batched-bench results (default: md)",
  1974. [](common_params & params, const std::string & value) {
  1975. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  1976. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  1977. else { std::invalid_argument("invalid value"); }
  1978. }
  1979. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  1980. add_opt(common_arg(
  1981. {"--log-disable"},
  1982. "Log disable",
  1983. [](common_params &) {
  1984. common_log_pause(common_log_main());
  1985. }
  1986. ));
  1987. add_opt(common_arg(
  1988. {"--log-file"}, "FNAME",
  1989. "Log to file",
  1990. [](common_params &, const std::string & value) {
  1991. common_log_set_file(common_log_main(), value.c_str());
  1992. }
  1993. ));
  1994. add_opt(common_arg(
  1995. {"--log-colors"},
  1996. "Enable colored logging",
  1997. [](common_params &) {
  1998. common_log_set_colors(common_log_main(), true);
  1999. }
  2000. ).set_env("LLAMA_LOG_COLORS"));
  2001. add_opt(common_arg(
  2002. {"-v", "--verbose", "--log-verbose"},
  2003. "Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
  2004. [](common_params & params) {
  2005. params.verbosity = INT_MAX;
  2006. common_log_set_verbosity_thold(INT_MAX);
  2007. }
  2008. ));
  2009. add_opt(common_arg(
  2010. {"-lv", "--verbosity", "--log-verbosity"}, "N",
  2011. "Set the verbosity threshold. Messages with a higher verbosity will be ignored.",
  2012. [](common_params & params, int value) {
  2013. params.verbosity = value;
  2014. common_log_set_verbosity_thold(value);
  2015. }
  2016. ).set_env("LLAMA_LOG_VERBOSITY"));
  2017. add_opt(common_arg(
  2018. {"--log-prefix"},
  2019. "Enable prefx in log messages",
  2020. [](common_params &) {
  2021. common_log_set_prefix(common_log_main(), true);
  2022. }
  2023. ).set_env("LLAMA_LOG_PREFIX"));
  2024. add_opt(common_arg(
  2025. {"--log-timestamps"},
  2026. "Enable timestamps in log messages",
  2027. [](common_params &) {
  2028. common_log_set_timestamps(common_log_main(), true);
  2029. }
  2030. ).set_env("LLAMA_LOG_TIMESTAMPS"));
  2031. // speculative parameters
  2032. add_opt(common_arg(
  2033. {"-td", "--threads-draft"}, "N",
  2034. "number of threads to use during generation (default: same as --threads)",
  2035. [](common_params & params, int value) {
  2036. params.speculative.cpuparams.n_threads = value;
  2037. if (params.speculative.cpuparams.n_threads <= 0) {
  2038. params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
  2039. }
  2040. }
  2041. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2042. add_opt(common_arg(
  2043. {"-tbd", "--threads-batch-draft"}, "N",
  2044. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  2045. [](common_params & params, int value) {
  2046. params.speculative.cpuparams_batch.n_threads = value;
  2047. if (params.speculative.cpuparams_batch.n_threads <= 0) {
  2048. params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  2049. }
  2050. }
  2051. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2052. add_opt(common_arg(
  2053. {"-Cd", "--cpu-mask-draft"}, "M",
  2054. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2055. [](common_params & params, const std::string & mask) {
  2056. params.speculative.cpuparams.mask_valid = true;
  2057. if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) {
  2058. throw std::invalid_argument("invalid cpumask");
  2059. }
  2060. }
  2061. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2062. add_opt(common_arg(
  2063. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  2064. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  2065. [](common_params & params, const std::string & range) {
  2066. params.speculative.cpuparams.mask_valid = true;
  2067. if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) {
  2068. throw std::invalid_argument("invalid range");
  2069. }
  2070. }
  2071. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2072. add_opt(common_arg(
  2073. {"--cpu-strict-draft"}, "<0|1>",
  2074. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  2075. [](common_params & params, int value) {
  2076. params.speculative.cpuparams.strict_cpu = value;
  2077. }
  2078. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2079. add_opt(common_arg(
  2080. {"--prio-draft"}, "N",
  2081. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority),
  2082. [](common_params & params, int prio) {
  2083. if (prio < 0 || prio > 3) {
  2084. throw std::invalid_argument("invalid value");
  2085. }
  2086. params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio;
  2087. }
  2088. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2089. add_opt(common_arg(
  2090. {"--poll-draft"}, "<0|1>",
  2091. "Use polling to wait for draft model work (default: same as --poll])",
  2092. [](common_params & params, int value) {
  2093. params.speculative.cpuparams.poll = value;
  2094. }
  2095. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2096. add_opt(common_arg(
  2097. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  2098. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2099. [](common_params & params, const std::string & mask) {
  2100. params.speculative.cpuparams_batch.mask_valid = true;
  2101. if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) {
  2102. throw std::invalid_argument("invalid cpumask");
  2103. }
  2104. }
  2105. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2106. add_opt(common_arg(
  2107. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  2108. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  2109. [](common_params & params, const std::string & range) {
  2110. params.speculative.cpuparams_batch.mask_valid = true;
  2111. if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) {
  2112. throw std::invalid_argument("invalid cpumask");
  2113. }
  2114. }
  2115. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2116. add_opt(common_arg(
  2117. {"--cpu-strict-batch-draft"}, "<0|1>",
  2118. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  2119. [](common_params & params, int value) {
  2120. params.speculative.cpuparams_batch.strict_cpu = value;
  2121. }
  2122. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2123. add_opt(common_arg(
  2124. {"--prio-batch-draft"}, "N",
  2125. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority),
  2126. [](common_params & params, int prio) {
  2127. if (prio < 0 || prio > 3) {
  2128. throw std::invalid_argument("invalid value");
  2129. }
  2130. params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  2131. }
  2132. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2133. add_opt(common_arg(
  2134. {"--poll-batch-draft"}, "<0|1>",
  2135. "Use polling to wait for draft model work (default: --poll-draft)",
  2136. [](common_params & params, int value) {
  2137. params.speculative.cpuparams_batch.poll = value;
  2138. }
  2139. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2140. add_opt(common_arg(
  2141. {"--draft-max", "--draft", "--draft-n"}, "N",
  2142. string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max),
  2143. [](common_params & params, int value) {
  2144. params.speculative.n_max = value;
  2145. }
  2146. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MAX"));
  2147. add_opt(common_arg(
  2148. {"--draft-min", "--draft-n-min"}, "N",
  2149. string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min),
  2150. [](common_params & params, int value) {
  2151. params.speculative.n_min = value;
  2152. }
  2153. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MIN"));
  2154. add_opt(common_arg(
  2155. {"--draft-p-split"}, "P",
  2156. string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split),
  2157. [](common_params & params, const std::string & value) {
  2158. params.speculative.p_split = std::stof(value);
  2159. }
  2160. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT"));
  2161. add_opt(common_arg(
  2162. {"--draft-p-min"}, "P",
  2163. string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min),
  2164. [](common_params & params, const std::string & value) {
  2165. params.speculative.p_min = std::stof(value);
  2166. }
  2167. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_P_MIN"));
  2168. add_opt(common_arg(
  2169. {"-cd", "--ctx-size-draft"}, "N",
  2170. string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx),
  2171. [](common_params & params, int value) {
  2172. params.speculative.n_ctx = value;
  2173. }
  2174. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT"));
  2175. add_opt(common_arg(
  2176. {"-devd", "--device-draft"}, "<dev1,dev2,..>",
  2177. "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n"
  2178. "use --list-devices to see a list of available devices",
  2179. [](common_params & params, const std::string & value) {
  2180. params.speculative.devices = parse_device_list(value);
  2181. }
  2182. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2183. add_opt(common_arg(
  2184. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  2185. "number of layers to store in VRAM for the draft model",
  2186. [](common_params & params, int value) {
  2187. params.speculative.n_gpu_layers = value;
  2188. if (!llama_supports_gpu_offload()) {
  2189. fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
  2190. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2191. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2192. }
  2193. }
  2194. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT"));
  2195. add_opt(common_arg(
  2196. {"-md", "--model-draft"}, "FNAME",
  2197. "draft model for speculative decoding (default: unused)",
  2198. [](common_params & params, const std::string & value) {
  2199. params.speculative.model = value;
  2200. }
  2201. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
  2202. add_opt(common_arg(
  2203. {"-mv", "--model-vocoder"}, "FNAME",
  2204. "vocoder model for audio generation (default: unused)",
  2205. [](common_params & params, const std::string & value) {
  2206. params.vocoder.model = value;
  2207. }
  2208. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2209. add_opt(common_arg(
  2210. {"--tts-use-guide-tokens"},
  2211. "Use guide tokens to improve TTS word recall",
  2212. [](common_params & params) {
  2213. params.vocoder.use_guide_tokens = true;
  2214. }
  2215. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2216. // model-specific
  2217. add_opt(common_arg(
  2218. {"--tts-oute-default"},
  2219. string_format("use default OuteTTS models (note: can download weights from the internet)"),
  2220. [](common_params & params) {
  2221. params.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
  2222. params.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
  2223. params.vocoder.hf_repo = "ggml-org/WavTokenizer";
  2224. params.vocoder.hf_file = "WavTokenizer-Large-75-F16.gguf";
  2225. }
  2226. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2227. return ctx_arg;
  2228. }