arg.cpp 146 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310
  1. #include "arg.h"
  2. #include "chat.h"
  3. #include "common.h"
  4. #include "json-schema-to-grammar.h"
  5. #include "log.h"
  6. #include "sampling.h"
  7. #include "download.h"
  8. // fix problem with std::min and std::max
  9. #if defined(_WIN32)
  10. #define WIN32_LEAN_AND_MEAN
  11. #ifndef NOMINMAX
  12. # define NOMINMAX
  13. #endif
  14. #include <windows.h>
  15. #endif
  16. #define JSON_ASSERT GGML_ASSERT
  17. #include <nlohmann/json.hpp>
  18. #include <algorithm>
  19. #include <climits>
  20. #include <cstdarg>
  21. #include <fstream>
  22. #include <list>
  23. #include <regex>
  24. #include <set>
  25. #include <string>
  26. #include <thread> // for hardware_concurrency
  27. #include <vector>
  28. #ifndef __EMSCRIPTEN__
  29. #ifdef __linux__
  30. #include <linux/limits.h>
  31. #elif defined(_WIN32)
  32. # if !defined(PATH_MAX)
  33. # define PATH_MAX MAX_PATH
  34. # endif
  35. #elif defined(_AIX)
  36. #include <sys/limits.h>
  37. #else
  38. #include <sys/syslimits.h>
  39. #endif
  40. #endif
  41. #define LLAMA_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
  42. using json = nlohmann::ordered_json;
  43. using namespace common_arg_utils;
  44. static std::initializer_list<enum llama_example> mmproj_examples = {
  45. LLAMA_EXAMPLE_MTMD,
  46. LLAMA_EXAMPLE_SERVER,
  47. LLAMA_EXAMPLE_CLI,
  48. };
  49. static std::string read_file(const std::string & fname) {
  50. std::ifstream file(fname);
  51. if (!file) {
  52. throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
  53. }
  54. std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
  55. file.close();
  56. return content;
  57. }
  58. static const std::vector<common_arg> & get_common_arg_defs() {
  59. static const std::vector<common_arg> options = [] {
  60. common_params params;
  61. auto ctx = common_params_parser_init(params, LLAMA_EXAMPLE_SERVER, nullptr);
  62. return ctx.options;
  63. }();
  64. return options;
  65. }
  66. common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  67. this->examples = examples;
  68. return *this;
  69. }
  70. common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
  71. this->excludes = excludes;
  72. return *this;
  73. }
  74. common_arg & common_arg::set_env(const char * env) {
  75. help = help + "\n(env: " + env + ")";
  76. this->env = env;
  77. return *this;
  78. }
  79. common_arg & common_arg::set_sparam() {
  80. is_sparam = true;
  81. return *this;
  82. }
  83. bool common_arg::in_example(enum llama_example ex) {
  84. return examples.find(ex) != examples.end();
  85. }
  86. bool common_arg::is_exclude(enum llama_example ex) {
  87. return excludes.find(ex) != excludes.end();
  88. }
  89. bool common_arg::get_value_from_env(std::string & output) const {
  90. if (env == nullptr) return false;
  91. char * value = std::getenv(env);
  92. if (value) {
  93. output = value;
  94. return true;
  95. }
  96. return false;
  97. }
  98. bool common_arg::has_value_from_env() const {
  99. return env != nullptr && std::getenv(env);
  100. }
  101. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  102. std::vector<std::string> result;
  103. std::istringstream iss(input);
  104. std::string line;
  105. auto add_line = [&](const std::string& l) {
  106. if (l.length() <= max_char_per_line) {
  107. result.push_back(l);
  108. } else {
  109. std::istringstream line_stream(l);
  110. std::string word, current_line;
  111. while (line_stream >> word) {
  112. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  113. if (!current_line.empty()) result.push_back(current_line);
  114. current_line = word;
  115. } else {
  116. current_line += (!current_line.empty() ? " " : "") + word;
  117. }
  118. }
  119. if (!current_line.empty()) result.push_back(current_line);
  120. }
  121. };
  122. while (std::getline(iss, line)) {
  123. add_line(line);
  124. }
  125. return result;
  126. }
  127. std::string common_arg::to_string() const {
  128. // params for printing to console
  129. const static int n_leading_spaces = 40;
  130. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  131. std::string leading_spaces(n_leading_spaces, ' ');
  132. std::ostringstream ss;
  133. for (const auto arg : args) {
  134. if (arg == args.front()) {
  135. if (args.size() == 1) {
  136. ss << arg;
  137. } else {
  138. // first arg is usually abbreviation, we need padding to make it more beautiful
  139. auto tmp = std::string(arg) + ", ";
  140. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  141. ss << tmp << spaces;
  142. }
  143. } else {
  144. ss << arg << (arg != args.back() ? ", " : "");
  145. }
  146. }
  147. if (value_hint) ss << " " << value_hint;
  148. if (value_hint_2) ss << " " << value_hint_2;
  149. if (ss.tellp() > n_leading_spaces - 3) {
  150. // current line is too long, add new line
  151. ss << "\n" << leading_spaces;
  152. } else {
  153. // padding between arg and help, same line
  154. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  155. }
  156. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  157. for (const auto & line : help_lines) {
  158. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  159. }
  160. return ss.str();
  161. }
  162. //
  163. // utils
  164. //
  165. // Helper function to parse tensor buffer override strings
  166. static void parse_tensor_buffer_overrides(const std::string & value, std::vector<llama_model_tensor_buft_override> & overrides) {
  167. std::map<std::string, ggml_backend_buffer_type_t> buft_list;
  168. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  169. auto * dev = ggml_backend_dev_get(i);
  170. auto * buft = ggml_backend_dev_buffer_type(dev);
  171. if (buft) {
  172. buft_list[ggml_backend_buft_name(buft)] = buft;
  173. }
  174. }
  175. for (const auto & override : string_split<std::string>(value, ',')) {
  176. std::string::size_type pos = override.find('=');
  177. if (pos == std::string::npos) {
  178. throw std::invalid_argument("invalid value");
  179. }
  180. std::string tensor_name = override.substr(0, pos);
  181. std::string buffer_type = override.substr(pos + 1);
  182. if (buft_list.find(buffer_type) == buft_list.end()) {
  183. printf("Available buffer types:\n");
  184. for (const auto & it : buft_list) {
  185. printf(" %s\n", ggml_backend_buft_name(it.second));
  186. }
  187. throw std::invalid_argument("unknown buffer type");
  188. }
  189. // keep strings alive and avoid leaking memory by storing them in a static vector
  190. static std::list<std::string> buft_overrides;
  191. buft_overrides.push_back(tensor_name);
  192. overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
  193. }
  194. }
  195. struct handle_model_result {
  196. bool found_mmproj = false;
  197. common_params_model mmproj;
  198. };
  199. static handle_model_result common_params_handle_model(
  200. struct common_params_model & model,
  201. const std::string & bearer_token,
  202. bool offline) {
  203. handle_model_result result;
  204. // handle pre-fill default model path and url based on hf_repo and hf_file
  205. {
  206. if (!model.docker_repo.empty()) { // Handle Docker URLs by resolving them to local paths
  207. model.path = common_docker_resolve_model(model.docker_repo);
  208. model.name = model.docker_repo; // set name for consistency
  209. } else if (!model.hf_repo.empty()) {
  210. // short-hand to avoid specifying --hf-file -> default it to --model
  211. if (model.hf_file.empty()) {
  212. if (model.path.empty()) {
  213. auto auto_detected = common_get_hf_file(model.hf_repo, bearer_token, offline);
  214. if (auto_detected.repo.empty() || auto_detected.ggufFile.empty()) {
  215. exit(1); // built without CURL, error message already printed
  216. }
  217. model.name = model.hf_repo; // repo name with tag
  218. model.hf_repo = auto_detected.repo; // repo name without tag
  219. model.hf_file = auto_detected.ggufFile;
  220. if (!auto_detected.mmprojFile.empty()) {
  221. result.found_mmproj = true;
  222. result.mmproj.hf_repo = model.hf_repo;
  223. result.mmproj.hf_file = auto_detected.mmprojFile;
  224. }
  225. } else {
  226. model.hf_file = model.path;
  227. }
  228. }
  229. std::string model_endpoint = get_model_endpoint();
  230. model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
  231. // make sure model path is present (for caching purposes)
  232. if (model.path.empty()) {
  233. // this is to avoid different repo having same file name, or same file name in different subdirs
  234. std::string filename = model.hf_repo + "_" + model.hf_file;
  235. // to make sure we don't have any slashes in the filename
  236. string_replace_all(filename, "/", "_");
  237. model.path = fs_get_cache_file(filename);
  238. }
  239. } else if (!model.url.empty()) {
  240. if (model.path.empty()) {
  241. auto f = string_split<std::string>(model.url, '#').front();
  242. f = string_split<std::string>(f, '?').front();
  243. model.path = fs_get_cache_file(string_split<std::string>(f, '/').back());
  244. }
  245. }
  246. }
  247. // then, download it if needed
  248. if (!model.url.empty()) {
  249. bool ok = common_download_model(model, bearer_token, offline);
  250. if (!ok) {
  251. LOG_ERR("error: failed to download model from %s\n", model.url.c_str());
  252. exit(1);
  253. }
  254. }
  255. return result;
  256. }
  257. const std::vector<ggml_type> kv_cache_types = {
  258. GGML_TYPE_F32,
  259. GGML_TYPE_F16,
  260. GGML_TYPE_BF16,
  261. GGML_TYPE_Q8_0,
  262. GGML_TYPE_Q4_0,
  263. GGML_TYPE_Q4_1,
  264. GGML_TYPE_IQ4_NL,
  265. GGML_TYPE_Q5_0,
  266. GGML_TYPE_Q5_1,
  267. };
  268. static ggml_type kv_cache_type_from_str(const std::string & s) {
  269. for (const auto & type : kv_cache_types) {
  270. if (ggml_type_name(type) == s) {
  271. return type;
  272. }
  273. }
  274. throw std::runtime_error("Unsupported cache type: " + s);
  275. }
  276. static std::string get_all_kv_cache_types() {
  277. std::ostringstream msg;
  278. for (const auto & type : kv_cache_types) {
  279. msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
  280. }
  281. return msg.str();
  282. }
  283. //
  284. // CLI argument parsing functions
  285. //
  286. static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) {
  287. common_params & params = ctx_arg.params;
  288. std::unordered_map<std::string, common_arg *> arg_to_options;
  289. for (auto & opt : ctx_arg.options) {
  290. for (const auto & arg : opt.args) {
  291. arg_to_options[arg] = &opt;
  292. }
  293. }
  294. // handle environment variables
  295. for (auto & opt : ctx_arg.options) {
  296. std::string value;
  297. if (opt.get_value_from_env(value)) {
  298. try {
  299. if (opt.handler_void && (value == "1" || value == "true")) {
  300. opt.handler_void(params);
  301. }
  302. if (opt.handler_int) {
  303. opt.handler_int(params, std::stoi(value));
  304. }
  305. if (opt.handler_string) {
  306. opt.handler_string(params, value);
  307. continue;
  308. }
  309. } catch (std::exception & e) {
  310. throw std::invalid_argument(string_format(
  311. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  312. }
  313. }
  314. }
  315. // handle command line arguments
  316. auto check_arg = [&](int i) {
  317. if (i+1 >= argc) {
  318. throw std::invalid_argument("expected value for argument");
  319. }
  320. };
  321. for (int i = 1; i < argc; i++) {
  322. const std::string arg_prefix = "--";
  323. std::string arg = argv[i];
  324. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  325. std::replace(arg.begin(), arg.end(), '_', '-');
  326. }
  327. if (arg_to_options.find(arg) == arg_to_options.end()) {
  328. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  329. }
  330. auto opt = *arg_to_options[arg];
  331. if (opt.has_value_from_env()) {
  332. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  333. }
  334. try {
  335. if (opt.handler_void) {
  336. opt.handler_void(params);
  337. continue;
  338. }
  339. // arg with single value
  340. check_arg(i);
  341. std::string val = argv[++i];
  342. if (opt.handler_int) {
  343. opt.handler_int(params, std::stoi(val));
  344. continue;
  345. }
  346. if (opt.handler_string) {
  347. opt.handler_string(params, val);
  348. continue;
  349. }
  350. // arg with 2 values
  351. check_arg(i);
  352. std::string val2 = argv[++i];
  353. if (opt.handler_str_str) {
  354. opt.handler_str_str(params, val, val2);
  355. continue;
  356. }
  357. } catch (std::exception & e) {
  358. throw std::invalid_argument(string_format(
  359. "error while handling argument \"%s\": %s\n\n"
  360. "usage:\n%s\n\nto show complete usage, run with -h",
  361. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  362. }
  363. }
  364. postprocess_cpu_params(params.cpuparams, nullptr);
  365. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  366. postprocess_cpu_params(params.speculative.cpuparams, &params.cpuparams);
  367. postprocess_cpu_params(params.speculative.cpuparams_batch, &params.cpuparams_batch);
  368. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  369. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  370. }
  371. // handle model and download
  372. {
  373. auto res = common_params_handle_model(params.model, params.hf_token, params.offline);
  374. if (params.no_mmproj) {
  375. params.mmproj = {};
  376. } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) {
  377. // optionally, handle mmproj model when -hf is specified
  378. params.mmproj = res.mmproj;
  379. }
  380. // only download mmproj if the current example is using it
  381. for (auto & ex : mmproj_examples) {
  382. if (ctx_arg.ex == ex) {
  383. common_params_handle_model(params.mmproj, params.hf_token, params.offline);
  384. break;
  385. }
  386. }
  387. common_params_handle_model(params.speculative.model, params.hf_token, params.offline);
  388. common_params_handle_model(params.vocoder.model, params.hf_token, params.offline);
  389. }
  390. // model is required (except for server)
  391. // TODO @ngxson : maybe show a list of available models in CLI in this case
  392. if (params.model.path.empty() && ctx_arg.ex != LLAMA_EXAMPLE_SERVER && !params.usage) {
  393. throw std::invalid_argument("error: --model is required\n");
  394. }
  395. if (params.escape) {
  396. string_process_escapes(params.prompt);
  397. string_process_escapes(params.input_prefix);
  398. string_process_escapes(params.input_suffix);
  399. for (auto & antiprompt : params.antiprompt) {
  400. string_process_escapes(antiprompt);
  401. }
  402. for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
  403. string_process_escapes(seq_breaker);
  404. }
  405. for (auto & pair : params.speculative.replacements) {
  406. string_process_escapes(pair.first);
  407. string_process_escapes(pair.second);
  408. }
  409. }
  410. if (!params.kv_overrides.empty()) {
  411. params.kv_overrides.emplace_back();
  412. params.kv_overrides.back().key[0] = 0;
  413. }
  414. if (!params.tensor_buft_overrides.empty()) {
  415. params.tensor_buft_overrides.push_back({nullptr, nullptr});
  416. }
  417. if (!params.speculative.tensor_buft_overrides.empty()) {
  418. params.speculative.tensor_buft_overrides.push_back({nullptr, nullptr});
  419. }
  420. if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
  421. throw std::runtime_error(string_format(
  422. "error: the supplied chat template is not supported: %s%s\n",
  423. params.chat_template.c_str(),
  424. params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
  425. ));
  426. }
  427. common_log_set_verbosity_thold(params.verbosity);
  428. return true;
  429. }
  430. static void common_params_print_usage(common_params_context & ctx_arg) {
  431. auto print_options = [](std::vector<common_arg *> & options) {
  432. for (common_arg * opt : options) {
  433. printf("%s", opt->to_string().c_str());
  434. }
  435. };
  436. std::vector<common_arg *> common_options;
  437. std::vector<common_arg *> sparam_options;
  438. std::vector<common_arg *> specific_options;
  439. for (auto & opt : ctx_arg.options) {
  440. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  441. if (opt.is_sparam) {
  442. sparam_options.push_back(&opt);
  443. } else if (opt.in_example(ctx_arg.ex)) {
  444. specific_options.push_back(&opt);
  445. } else {
  446. common_options.push_back(&opt);
  447. }
  448. }
  449. printf("----- common params -----\n\n");
  450. print_options(common_options);
  451. printf("\n\n----- sampling params -----\n\n");
  452. print_options(sparam_options);
  453. // TODO: maybe convert enum llama_example to string
  454. printf("\n\n----- example-specific params -----\n\n");
  455. print_options(specific_options);
  456. }
  457. static void common_params_print_completion(common_params_context & ctx_arg) {
  458. std::vector<common_arg *> common_options;
  459. std::vector<common_arg *> sparam_options;
  460. std::vector<common_arg *> specific_options;
  461. for (auto & opt : ctx_arg.options) {
  462. if (opt.is_sparam) {
  463. sparam_options.push_back(&opt);
  464. } else if (opt.in_example(ctx_arg.ex)) {
  465. specific_options.push_back(&opt);
  466. } else {
  467. common_options.push_back(&opt);
  468. }
  469. }
  470. printf("_llama_completions() {\n");
  471. printf(" local cur prev opts\n");
  472. printf(" COMPREPLY=()\n");
  473. printf(" cur=\"${COMP_WORDS[COMP_CWORD]}\"\n");
  474. printf(" prev=\"${COMP_WORDS[COMP_CWORD-1]}\"\n\n");
  475. printf(" opts=\"");
  476. auto print_options = [](const std::vector<common_arg *> & options) {
  477. for (const common_arg * opt : options) {
  478. for (const char * arg : opt->args) {
  479. printf("%s ", arg);
  480. }
  481. }
  482. };
  483. print_options(common_options);
  484. print_options(sparam_options);
  485. print_options(specific_options);
  486. printf("\"\n\n");
  487. printf(" case \"$prev\" in\n");
  488. printf(" --model|-m)\n");
  489. printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  490. printf(" return 0\n");
  491. printf(" ;;\n");
  492. printf(" --grammar-file)\n");
  493. printf(" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  494. printf(" return 0\n");
  495. printf(" ;;\n");
  496. printf(" --chat-template-file)\n");
  497. printf(" COMPREPLY=( $(compgen -f -X '!*.jinja' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  498. printf(" return 0\n");
  499. printf(" ;;\n");
  500. printf(" *)\n");
  501. printf(" COMPREPLY=( $(compgen -W \"${opts}\" -- \"$cur\") )\n");
  502. printf(" return 0\n");
  503. printf(" ;;\n");
  504. printf(" esac\n");
  505. printf("}\n\n");
  506. std::set<std::string> executables = {
  507. "llama-batched",
  508. "llama-batched-bench",
  509. "llama-bench",
  510. "llama-cli",
  511. "llama-convert-llama2c-to-ggml",
  512. "llama-cvector-generator",
  513. "llama-embedding",
  514. "llama-eval-callback",
  515. "llama-export-lora",
  516. "llama-gen-docs",
  517. "llama-gguf",
  518. "llama-gguf-hash",
  519. "llama-gguf-split",
  520. "llama-gritlm",
  521. "llama-imatrix",
  522. "llama-infill",
  523. "llama-mtmd-cli",
  524. "llama-llava-clip-quantize-cli",
  525. "llama-lookahead",
  526. "llama-lookup",
  527. "llama-lookup-create",
  528. "llama-lookup-merge",
  529. "llama-lookup-stats",
  530. "llama-parallel",
  531. "llama-passkey",
  532. "llama-perplexity",
  533. "llama-q8dot",
  534. "llama-quantize",
  535. "llama-qwen2vl-cli",
  536. "llama-retrieval",
  537. "llama-run",
  538. "llama-save-load-state",
  539. "llama-server",
  540. "llama-simple",
  541. "llama-simple-chat",
  542. "llama-speculative",
  543. "llama-speculative-simple",
  544. "llama-tokenize",
  545. "llama-tts",
  546. "llama-vdot"
  547. };
  548. for (const auto& exe : executables) {
  549. printf("complete -F _llama_completions %s\n", exe.c_str());
  550. }
  551. }
  552. static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
  553. std::vector<ggml_backend_dev_t> devices;
  554. auto dev_names = string_split<std::string>(value, ',');
  555. if (dev_names.empty()) {
  556. throw std::invalid_argument("no devices specified");
  557. }
  558. if (dev_names.size() == 1 && dev_names[0] == "none") {
  559. devices.push_back(nullptr);
  560. } else {
  561. for (const auto & device : dev_names) {
  562. auto * dev = ggml_backend_dev_by_name(device.c_str());
  563. if (!dev || ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
  564. throw std::invalid_argument(string_format("invalid device: %s", device.c_str()));
  565. }
  566. devices.push_back(dev);
  567. }
  568. devices.push_back(nullptr);
  569. }
  570. return devices;
  571. }
  572. static void add_rpc_devices(const std::string & servers) {
  573. auto rpc_servers = string_split<std::string>(servers, ',');
  574. if (rpc_servers.empty()) {
  575. throw std::invalid_argument("no RPC servers specified");
  576. }
  577. ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
  578. if (!rpc_reg) {
  579. throw std::invalid_argument("failed to find RPC backend");
  580. }
  581. typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint);
  582. ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server");
  583. if (!ggml_backend_rpc_add_server_fn) {
  584. throw std::invalid_argument("failed to find RPC add server function");
  585. }
  586. for (const auto & server : rpc_servers) {
  587. auto reg = ggml_backend_rpc_add_server_fn(server.c_str());
  588. ggml_backend_register(reg);
  589. }
  590. }
  591. bool common_params_parse(int argc, char ** argv, llama_example ex, std::map<common_arg, std::string> & out_map) {
  592. common_params dummy_params;
  593. common_params_context ctx_arg = common_params_parser_init(dummy_params, ex, nullptr);
  594. std::unordered_map<std::string, common_arg *> arg_to_options;
  595. for (auto & opt : ctx_arg.options) {
  596. for (const auto & arg : opt.args) {
  597. arg_to_options[arg] = &opt;
  598. }
  599. }
  600. // TODO @ngxson : find a way to deduplicate this code
  601. // handle command line arguments
  602. auto check_arg = [&](int i) {
  603. if (i+1 >= argc) {
  604. throw std::invalid_argument("expected value for argument");
  605. }
  606. };
  607. for (int i = 1; i < argc; i++) {
  608. const std::string arg_prefix = "--";
  609. std::string arg = argv[i];
  610. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  611. std::replace(arg.begin(), arg.end(), '_', '-');
  612. }
  613. if (arg_to_options.find(arg) == arg_to_options.end()) {
  614. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  615. }
  616. auto opt = *arg_to_options[arg];
  617. std::string val;
  618. if (opt.value_hint != nullptr) {
  619. // arg with single value
  620. check_arg(i);
  621. val = argv[++i];
  622. }
  623. if (opt.value_hint_2 != nullptr) {
  624. // TODO: support arg with 2 values
  625. throw std::invalid_argument("error: argument with 2 values is not yet supported\n");
  626. }
  627. out_map[opt] = val;
  628. }
  629. return true;
  630. }
  631. bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  632. auto ctx_arg = common_params_parser_init(params, ex, print_usage);
  633. const common_params params_org = ctx_arg.params; // the example can modify the default params
  634. try {
  635. if (!common_params_parse_ex(argc, argv, ctx_arg)) {
  636. ctx_arg.params = params_org;
  637. return false;
  638. }
  639. if (ctx_arg.params.usage) {
  640. common_params_print_usage(ctx_arg);
  641. if (ctx_arg.print_usage) {
  642. ctx_arg.print_usage(argc, argv);
  643. }
  644. exit(0);
  645. }
  646. if (ctx_arg.params.completion) {
  647. common_params_print_completion(ctx_arg);
  648. exit(0);
  649. }
  650. params.lr.init();
  651. } catch (const std::invalid_argument & ex) {
  652. fprintf(stderr, "%s\n", ex.what());
  653. ctx_arg.params = params_org;
  654. return false;
  655. } catch (std::exception & ex) {
  656. fprintf(stderr, "%s\n", ex.what());
  657. exit(1); // for other exceptions, we exit with status code 1
  658. }
  659. return true;
  660. }
  661. static std::string list_builtin_chat_templates() {
  662. std::vector<const char *> supported_tmpl;
  663. int32_t res = llama_chat_builtin_templates(nullptr, 0);
  664. supported_tmpl.resize(res);
  665. res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
  666. std::ostringstream msg;
  667. for (auto & tmpl : supported_tmpl) {
  668. msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", ");
  669. }
  670. return msg.str();
  671. }
  672. bool common_arg_utils::is_truthy(const std::string & value) {
  673. return value == "on" || value == "enabled" || value == "1";
  674. }
  675. bool common_arg_utils::is_falsey(const std::string & value) {
  676. return value == "off" || value == "disabled" || value == "0";
  677. }
  678. bool common_arg_utils::is_autoy(const std::string & value) {
  679. return value == "auto" || value == "-1";
  680. }
  681. common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  682. params.use_color = tty_can_use_colors();
  683. // load dynamic backends
  684. ggml_backend_load_all();
  685. common_params_context ctx_arg(params);
  686. ctx_arg.print_usage = print_usage;
  687. ctx_arg.ex = ex;
  688. std::string sampler_type_chars;
  689. std::string sampler_type_names;
  690. for (const auto & sampler : params.sampling.samplers) {
  691. sampler_type_chars += common_sampler_type_to_chr(sampler);
  692. sampler_type_names += common_sampler_type_to_str(sampler) + ";";
  693. }
  694. sampler_type_names.pop_back();
  695. /**
  696. * filter options by example
  697. * rules:
  698. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  699. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  700. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  701. */
  702. auto add_opt = [&](common_arg arg) {
  703. if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
  704. ctx_arg.options.push_back(std::move(arg));
  705. }
  706. };
  707. add_opt(common_arg(
  708. {"-h", "--help", "--usage"},
  709. "print usage and exit",
  710. [](common_params & params) {
  711. params.usage = true;
  712. }
  713. ));
  714. add_opt(common_arg(
  715. {"--version"},
  716. "show version and build info",
  717. [](common_params &) {
  718. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  719. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  720. exit(0);
  721. }
  722. ));
  723. add_opt(common_arg(
  724. {"-cl", "--cache-list"},
  725. "show list of models in cache",
  726. [](common_params &) {
  727. printf("model cache directory: %s\n", fs_get_cache_directory().c_str());
  728. auto models = common_list_cached_models();
  729. printf("number of models in cache: %zu\n", models.size());
  730. for (size_t i = 0; i < models.size(); i++) {
  731. auto & model = models[i];
  732. printf("%4d. %s\n", (int) i + 1, model.to_string().c_str());
  733. }
  734. exit(0);
  735. }
  736. ));
  737. add_opt(common_arg(
  738. {"--completion-bash"},
  739. "print source-able bash completion script for llama.cpp",
  740. [](common_params & params) {
  741. params.completion = true;
  742. }
  743. ));
  744. add_opt(common_arg(
  745. {"--verbose-prompt"},
  746. string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  747. [](common_params & params) {
  748. params.verbose_prompt = true;
  749. }
  750. ));
  751. add_opt(common_arg(
  752. {"--no-display-prompt"},
  753. string_format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  754. [](common_params & params) {
  755. params.display_prompt = false;
  756. }
  757. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}));
  758. add_opt(common_arg(
  759. {"-co", "--color"}, "[on|off|auto]",
  760. "Colorize output to distinguish prompt and user input from generations ('on', 'off', or 'auto', default: 'auto')\n"
  761. "'auto' enables colors when output is to a terminal",
  762. [](common_params & params, const std::string & value) {
  763. if (is_truthy(value)) {
  764. params.use_color = true;
  765. } else if (is_falsey(value)) {
  766. params.use_color = false;
  767. } else if (is_autoy(value)) {
  768. params.use_color = tty_can_use_colors();
  769. } else {
  770. throw std::invalid_argument(
  771. string_format("error: unknown value for --color: '%s'\n", value.c_str()));
  772. }
  773. }
  774. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  775. add_opt(common_arg(
  776. {"-t", "--threads"}, "N",
  777. string_format("number of CPU threads to use during generation (default: %d)", params.cpuparams.n_threads),
  778. [](common_params & params, int value) {
  779. params.cpuparams.n_threads = value;
  780. if (params.cpuparams.n_threads <= 0) {
  781. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  782. }
  783. }
  784. ).set_env("LLAMA_ARG_THREADS"));
  785. add_opt(common_arg(
  786. {"-tb", "--threads-batch"}, "N",
  787. "number of threads to use during batch and prompt processing (default: same as --threads)",
  788. [](common_params & params, int value) {
  789. params.cpuparams_batch.n_threads = value;
  790. if (params.cpuparams_batch.n_threads <= 0) {
  791. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  792. }
  793. }
  794. ));
  795. add_opt(common_arg(
  796. {"-C", "--cpu-mask"}, "M",
  797. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  798. [](common_params & params, const std::string & mask) {
  799. params.cpuparams.mask_valid = true;
  800. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  801. throw std::invalid_argument("invalid cpumask");
  802. }
  803. }
  804. ));
  805. add_opt(common_arg(
  806. {"-Cr", "--cpu-range"}, "lo-hi",
  807. "range of CPUs for affinity. Complements --cpu-mask",
  808. [](common_params & params, const std::string & range) {
  809. params.cpuparams.mask_valid = true;
  810. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  811. throw std::invalid_argument("invalid range");
  812. }
  813. }
  814. ));
  815. add_opt(common_arg(
  816. {"--cpu-strict"}, "<0|1>",
  817. string_format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  818. [](common_params & params, const std::string & value) {
  819. params.cpuparams.strict_cpu = std::stoul(value);
  820. }
  821. ));
  822. add_opt(common_arg(
  823. {"--prio"}, "N",
  824. string_format("set process/thread priority : low(-1), normal(0), medium(1), high(2), realtime(3) (default: %d)\n", params.cpuparams.priority),
  825. [](common_params & params, int prio) {
  826. if (prio < GGML_SCHED_PRIO_LOW || prio > GGML_SCHED_PRIO_REALTIME) {
  827. throw std::invalid_argument("invalid value");
  828. }
  829. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  830. }
  831. ));
  832. add_opt(common_arg(
  833. {"--poll"}, "<0...100>",
  834. string_format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  835. [](common_params & params, const std::string & value) {
  836. params.cpuparams.poll = std::stoul(value);
  837. }
  838. ));
  839. add_opt(common_arg(
  840. {"-Cb", "--cpu-mask-batch"}, "M",
  841. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  842. [](common_params & params, const std::string & mask) {
  843. params.cpuparams_batch.mask_valid = true;
  844. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  845. throw std::invalid_argument("invalid cpumask");
  846. }
  847. }
  848. ));
  849. add_opt(common_arg(
  850. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  851. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  852. [](common_params & params, const std::string & range) {
  853. params.cpuparams_batch.mask_valid = true;
  854. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  855. throw std::invalid_argument("invalid range");
  856. }
  857. }
  858. ));
  859. add_opt(common_arg(
  860. {"--cpu-strict-batch"}, "<0|1>",
  861. "use strict CPU placement (default: same as --cpu-strict)",
  862. [](common_params & params, int value) {
  863. params.cpuparams_batch.strict_cpu = value;
  864. }
  865. ));
  866. add_opt(common_arg(
  867. {"--prio-batch"}, "N",
  868. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  869. [](common_params & params, int prio) {
  870. if (prio < 0 || prio > 3) {
  871. throw std::invalid_argument("invalid value");
  872. }
  873. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  874. }
  875. ));
  876. add_opt(common_arg(
  877. {"--poll-batch"}, "<0|1>",
  878. "use polling to wait for work (default: same as --poll)",
  879. [](common_params & params, int value) {
  880. params.cpuparams_batch.poll = value;
  881. }
  882. ));
  883. add_opt(common_arg(
  884. {"-lcs", "--lookup-cache-static"}, "FNAME",
  885. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  886. [](common_params & params, const std::string & value) {
  887. params.lookup_cache_static = value;
  888. }
  889. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  890. add_opt(common_arg(
  891. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  892. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  893. [](common_params & params, const std::string & value) {
  894. params.lookup_cache_dynamic = value;
  895. }
  896. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  897. add_opt(common_arg(
  898. {"-c", "--ctx-size"}, "N",
  899. string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  900. [](common_params & params, int value) {
  901. params.n_ctx = value;
  902. }
  903. ).set_env("LLAMA_ARG_CTX_SIZE"));
  904. add_opt(common_arg(
  905. {"-n", "--predict", "--n-predict"}, "N",
  906. string_format(
  907. ex == LLAMA_EXAMPLE_COMPLETION
  908. ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
  909. : "number of tokens to predict (default: %d, -1 = infinity)",
  910. params.n_predict),
  911. [](common_params & params, int value) {
  912. params.n_predict = value;
  913. }
  914. ).set_env("LLAMA_ARG_N_PREDICT"));
  915. add_opt(common_arg(
  916. {"-b", "--batch-size"}, "N",
  917. string_format("logical maximum batch size (default: %d)", params.n_batch),
  918. [](common_params & params, int value) {
  919. params.n_batch = value;
  920. }
  921. ).set_env("LLAMA_ARG_BATCH"));
  922. add_opt(common_arg(
  923. {"-ub", "--ubatch-size"}, "N",
  924. string_format("physical maximum batch size (default: %d)", params.n_ubatch),
  925. [](common_params & params, int value) {
  926. params.n_ubatch = value;
  927. }
  928. ).set_env("LLAMA_ARG_UBATCH"));
  929. add_opt(common_arg(
  930. {"--keep"}, "N",
  931. string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  932. [](common_params & params, int value) {
  933. params.n_keep = value;
  934. }
  935. ));
  936. add_opt(common_arg(
  937. {"--swa-full"},
  938. string_format("use full-size SWA cache (default: %s)\n"
  939. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)", params.swa_full ? "true" : "false"),
  940. [](common_params & params) {
  941. params.swa_full = true;
  942. }
  943. ).set_env("LLAMA_ARG_SWA_FULL"));
  944. add_opt(common_arg(
  945. {"--ctx-checkpoints", "--swa-checkpoints"}, "N",
  946. string_format("max number of context checkpoints to create per slot (default: %d)\n"
  947. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/15293)", params.n_ctx_checkpoints),
  948. [](common_params & params, int value) {
  949. params.n_ctx_checkpoints = value;
  950. }
  951. ).set_env("LLAMA_ARG_CTX_CHECKPOINTS").set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  952. add_opt(common_arg(
  953. {"--cache-ram", "-cram"}, "N",
  954. string_format("set the maximum cache size in MiB (default: %d, -1 - no limit, 0 - disable)\n"
  955. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/16391)", params.cache_ram_mib),
  956. [](common_params & params, int value) {
  957. params.cache_ram_mib = value;
  958. }
  959. ).set_env("LLAMA_ARG_CACHE_RAM").set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  960. add_opt(common_arg(
  961. {"--kv-unified", "-kvu"},
  962. string_format("use single unified KV buffer for the KV cache of all sequences (default: %s)\n"
  963. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/14363)", params.kv_unified ? "true" : "false"),
  964. [](common_params & params) {
  965. params.kv_unified = true;
  966. }
  967. ).set_env("LLAMA_ARG_KV_UNIFIED"));
  968. add_opt(common_arg(
  969. {"--no-context-shift"},
  970. string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
  971. [](common_params & params) {
  972. params.ctx_shift = false;
  973. }
  974. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
  975. add_opt(common_arg(
  976. {"--context-shift"},
  977. string_format("enables context shift on infinite text generation (default: %s)", params.ctx_shift ? "enabled" : "disabled"),
  978. [](common_params & params) {
  979. params.ctx_shift = true;
  980. }
  981. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_CONTEXT_SHIFT"));
  982. add_opt(common_arg(
  983. {"--chunks"}, "N",
  984. string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  985. [](common_params & params, int value) {
  986. params.n_chunks = value;
  987. }
  988. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  989. add_opt(common_arg({ "-fa", "--flash-attn" }, "[on|off|auto]",
  990. string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')",
  991. llama_flash_attn_type_name(params.flash_attn_type)),
  992. [](common_params & params, const std::string & value) {
  993. if (is_truthy(value)) {
  994. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
  995. } else if (is_falsey(value)) {
  996. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
  997. } else if (is_autoy(value)) {
  998. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
  999. } else {
  1000. throw std::runtime_error(
  1001. string_format("error: unknown value for --flash-attn: '%s'\n", value.c_str()));
  1002. }
  1003. }).set_env("LLAMA_ARG_FLASH_ATTN"));
  1004. add_opt(common_arg(
  1005. {"-p", "--prompt"}, "PROMPT",
  1006. "prompt to start generation with; for system message, use -sys",
  1007. [](common_params & params, const std::string & value) {
  1008. params.prompt = value;
  1009. }
  1010. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1011. add_opt(common_arg(
  1012. {"-sys", "--system-prompt"}, "PROMPT",
  1013. "system prompt to use with model (if applicable, depending on chat template)",
  1014. [](common_params & params, const std::string & value) {
  1015. params.system_prompt = value;
  1016. }
  1017. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_DIFFUSION}));
  1018. add_opt(common_arg(
  1019. {"--no-perf"},
  1020. string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
  1021. [](common_params & params) {
  1022. params.no_perf = true;
  1023. params.sampling.no_perf = true;
  1024. }
  1025. ).set_env("LLAMA_ARG_NO_PERF"));
  1026. add_opt(common_arg(
  1027. {"--no-show-timings"},
  1028. string_format("disable timing information after each response (default: %s)", params.show_timings ? "true" : "false"),
  1029. [](common_params & params) {
  1030. params.show_timings = false;
  1031. }
  1032. ).set_examples({LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_NO_SHOW_TIMINGS"));
  1033. add_opt(common_arg(
  1034. {"-f", "--file"}, "FNAME",
  1035. "a file containing the prompt (default: none)",
  1036. [](common_params & params, const std::string & value) {
  1037. params.prompt = read_file(value);
  1038. // store the external file name in params
  1039. params.prompt_file = value;
  1040. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  1041. params.prompt.pop_back();
  1042. }
  1043. }
  1044. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1045. add_opt(common_arg(
  1046. {"-sysf", "--system-prompt-file"}, "FNAME",
  1047. "a file containing the system prompt (default: none)",
  1048. [](common_params & params, const std::string & value) {
  1049. params.system_prompt = read_file(value);
  1050. if (!params.system_prompt.empty() && params.system_prompt.back() == '\n') {
  1051. params.system_prompt.pop_back();
  1052. }
  1053. }
  1054. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_DIFFUSION}));
  1055. add_opt(common_arg(
  1056. {"--in-file"}, "FNAME",
  1057. "an input file (repeat to specify multiple files)",
  1058. [](common_params & params, const std::string & value) {
  1059. std::ifstream file(value);
  1060. if (!file) {
  1061. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1062. }
  1063. params.in_files.push_back(value);
  1064. }
  1065. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1066. add_opt(common_arg(
  1067. {"-bf", "--binary-file"}, "FNAME",
  1068. "binary file containing the prompt (default: none)",
  1069. [](common_params & params, const std::string & value) {
  1070. std::ifstream file(value, std::ios::binary);
  1071. if (!file) {
  1072. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1073. }
  1074. // store the external file name in params
  1075. params.prompt_file = value;
  1076. std::ostringstream ss;
  1077. ss << file.rdbuf();
  1078. params.prompt = ss.str();
  1079. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  1080. }
  1081. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1082. add_opt(common_arg(
  1083. {"-e", "--escape"},
  1084. string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  1085. [](common_params & params) {
  1086. params.escape = true;
  1087. }
  1088. ));
  1089. add_opt(common_arg(
  1090. {"--no-escape"},
  1091. "do not process escape sequences",
  1092. [](common_params & params) {
  1093. params.escape = false;
  1094. }
  1095. ));
  1096. add_opt(common_arg(
  1097. {"-ptc", "--print-token-count"}, "N",
  1098. string_format("print token count every N tokens (default: %d)", params.n_print),
  1099. [](common_params & params, int value) {
  1100. params.n_print = value;
  1101. }
  1102. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1103. add_opt(common_arg(
  1104. {"--prompt-cache"}, "FNAME",
  1105. "file to cache prompt state for faster startup (default: none)",
  1106. [](common_params & params, const std::string & value) {
  1107. params.path_prompt_cache = value;
  1108. }
  1109. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1110. add_opt(common_arg(
  1111. {"--prompt-cache-all"},
  1112. "if specified, saves user input and generations to cache as well\n",
  1113. [](common_params & params) {
  1114. params.prompt_cache_all = true;
  1115. }
  1116. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1117. add_opt(common_arg(
  1118. {"--prompt-cache-ro"},
  1119. "if specified, uses the prompt cache but does not update it",
  1120. [](common_params & params) {
  1121. params.prompt_cache_ro = true;
  1122. }
  1123. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1124. add_opt(common_arg(
  1125. {"-r", "--reverse-prompt"}, "PROMPT",
  1126. "halt generation at PROMPT, return control in interactive mode\n",
  1127. [](common_params & params, const std::string & value) {
  1128. params.antiprompt.emplace_back(value);
  1129. }
  1130. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER}));
  1131. add_opt(common_arg(
  1132. {"-sp", "--special"},
  1133. string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  1134. [](common_params & params) {
  1135. params.special = true;
  1136. }
  1137. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER}));
  1138. add_opt(common_arg(
  1139. {"-cnv", "--conversation"},
  1140. "run in conversation mode:\n"
  1141. "- does not print special tokens and suffix/prefix\n"
  1142. "- interactive mode is also enabled\n"
  1143. "(default: auto enabled if chat template is available)",
  1144. [](common_params & params) {
  1145. params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
  1146. }
  1147. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1148. add_opt(common_arg(
  1149. {"-no-cnv", "--no-conversation"},
  1150. "force disable conversation mode (default: false)",
  1151. [](common_params & params) {
  1152. params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
  1153. }
  1154. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}));
  1155. add_opt(common_arg(
  1156. {"-st", "--single-turn"},
  1157. "run conversation for a single turn only, then exit when done\n"
  1158. "will not be interactive if first turn is predefined with --prompt\n"
  1159. "(default: false)",
  1160. [](common_params & params) {
  1161. params.single_turn = true;
  1162. }
  1163. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}));
  1164. add_opt(common_arg(
  1165. {"-i", "--interactive"},
  1166. string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  1167. [](common_params & params) {
  1168. params.interactive = true;
  1169. }
  1170. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1171. add_opt(common_arg(
  1172. {"-if", "--interactive-first"},
  1173. string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  1174. [](common_params & params) {
  1175. params.interactive_first = true;
  1176. }
  1177. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1178. add_opt(common_arg(
  1179. {"-mli", "--multiline-input"},
  1180. "allows you to write or paste multiple lines without ending each in '\\'",
  1181. [](common_params & params) {
  1182. params.multiline_input = true;
  1183. }
  1184. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}));
  1185. add_opt(common_arg(
  1186. {"--in-prefix-bos"},
  1187. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  1188. [](common_params & params) {
  1189. params.input_prefix_bos = true;
  1190. params.enable_chat_template = false;
  1191. }
  1192. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1193. add_opt(common_arg(
  1194. {"--in-prefix"}, "STRING",
  1195. "string to prefix user inputs with (default: empty)",
  1196. [](common_params & params, const std::string & value) {
  1197. params.input_prefix = value;
  1198. params.enable_chat_template = false;
  1199. }
  1200. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1201. add_opt(common_arg(
  1202. {"--in-suffix"}, "STRING",
  1203. "string to suffix after user inputs with (default: empty)",
  1204. [](common_params & params, const std::string & value) {
  1205. params.input_suffix = value;
  1206. params.enable_chat_template = false;
  1207. }
  1208. ).set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1209. add_opt(common_arg(
  1210. {"--no-warmup"},
  1211. "skip warming up the model with an empty run",
  1212. [](common_params & params) {
  1213. params.warmup = false;
  1214. }
  1215. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_PERPLEXITY}));
  1216. add_opt(common_arg(
  1217. {"--spm-infill"},
  1218. string_format(
  1219. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  1220. params.spm_infill ? "enabled" : "disabled"
  1221. ),
  1222. [](common_params & params) {
  1223. params.spm_infill = true;
  1224. }
  1225. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1226. add_opt(common_arg(
  1227. {"--samplers"}, "SAMPLERS",
  1228. string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  1229. [](common_params & params, const std::string & value) {
  1230. const auto sampler_names = string_split<std::string>(value, ';');
  1231. params.sampling.samplers = common_sampler_types_from_names(sampler_names, true);
  1232. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_SAMPLERS;
  1233. }
  1234. ).set_sparam());
  1235. add_opt(common_arg(
  1236. {"-s", "--seed"}, "SEED",
  1237. string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED),
  1238. [](common_params & params, const std::string & value) {
  1239. params.sampling.seed = std::stoul(value);
  1240. }
  1241. ).set_sparam());
  1242. add_opt(common_arg(
  1243. {"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
  1244. string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  1245. [](common_params & params, const std::string & value) {
  1246. params.sampling.samplers = common_sampler_types_from_chars(value);
  1247. }
  1248. ).set_sparam());
  1249. add_opt(common_arg(
  1250. {"--ignore-eos"},
  1251. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  1252. [](common_params & params) {
  1253. params.sampling.ignore_eos = true;
  1254. }
  1255. ).set_sparam());
  1256. add_opt(common_arg(
  1257. {"--temp"}, "N",
  1258. string_format("temperature (default: %.1f)", (double)params.sampling.temp),
  1259. [](common_params & params, const std::string & value) {
  1260. params.sampling.temp = std::stof(value);
  1261. params.sampling.temp = std::max(params.sampling.temp, 0.0f);
  1262. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TEMP;
  1263. }
  1264. ).set_sparam());
  1265. add_opt(common_arg(
  1266. {"--top-k"}, "N",
  1267. string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k),
  1268. [](common_params & params, int value) {
  1269. params.sampling.top_k = value;
  1270. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_K;
  1271. }
  1272. ).set_sparam());
  1273. add_opt(common_arg(
  1274. {"--top-p"}, "N",
  1275. string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p),
  1276. [](common_params & params, const std::string & value) {
  1277. params.sampling.top_p = std::stof(value);
  1278. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_P;
  1279. }
  1280. ).set_sparam());
  1281. add_opt(common_arg(
  1282. {"--min-p"}, "N",
  1283. string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p),
  1284. [](common_params & params, const std::string & value) {
  1285. params.sampling.min_p = std::stof(value);
  1286. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIN_P;
  1287. }
  1288. ).set_sparam());
  1289. add_opt(common_arg(
  1290. {"--top-nsigma"}, "N",
  1291. string_format("top-n-sigma sampling (default: %.1f, -1.0 = disabled)", params.sampling.top_n_sigma),
  1292. [](common_params & params, const std::string & value) {
  1293. params.sampling.top_n_sigma = std::stof(value);
  1294. }
  1295. ).set_sparam());
  1296. add_opt(common_arg(
  1297. {"--xtc-probability"}, "N",
  1298. string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
  1299. [](common_params & params, const std::string & value) {
  1300. params.sampling.xtc_probability = std::stof(value);
  1301. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_PROBABILITY;
  1302. }
  1303. ).set_sparam());
  1304. add_opt(common_arg(
  1305. {"--xtc-threshold"}, "N",
  1306. string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold),
  1307. [](common_params & params, const std::string & value) {
  1308. params.sampling.xtc_threshold = std::stof(value);
  1309. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_THRESHOLD;
  1310. }
  1311. ).set_sparam());
  1312. add_opt(common_arg(
  1313. {"--typical"}, "N",
  1314. string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p),
  1315. [](common_params & params, const std::string & value) {
  1316. params.sampling.typ_p = std::stof(value);
  1317. }
  1318. ).set_sparam());
  1319. add_opt(common_arg(
  1320. {"--repeat-last-n"}, "N",
  1321. string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
  1322. [](common_params & params, int value) {
  1323. if (value < -1) {
  1324. throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
  1325. }
  1326. params.sampling.penalty_last_n = value;
  1327. params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
  1328. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_LAST_N;
  1329. }
  1330. ).set_sparam());
  1331. add_opt(common_arg(
  1332. {"--repeat-penalty"}, "N",
  1333. string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat),
  1334. [](common_params & params, const std::string & value) {
  1335. params.sampling.penalty_repeat = std::stof(value);
  1336. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_REPEAT;
  1337. }
  1338. ).set_sparam());
  1339. add_opt(common_arg(
  1340. {"--presence-penalty"}, "N",
  1341. string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present),
  1342. [](common_params & params, const std::string & value) {
  1343. params.sampling.penalty_present = std::stof(value);
  1344. }
  1345. ).set_sparam());
  1346. add_opt(common_arg(
  1347. {"--frequency-penalty"}, "N",
  1348. string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq),
  1349. [](common_params & params, const std::string & value) {
  1350. params.sampling.penalty_freq = std::stof(value);
  1351. }
  1352. ).set_sparam());
  1353. add_opt(common_arg(
  1354. {"--dry-multiplier"}, "N",
  1355. string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier),
  1356. [](common_params & params, const std::string & value) {
  1357. params.sampling.dry_multiplier = std::stof(value);
  1358. }
  1359. ).set_sparam());
  1360. add_opt(common_arg(
  1361. {"--dry-base"}, "N",
  1362. string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base),
  1363. [](common_params & params, const std::string & value) {
  1364. float potential_base = std::stof(value);
  1365. if (potential_base >= 1.0f)
  1366. {
  1367. params.sampling.dry_base = potential_base;
  1368. }
  1369. }
  1370. ).set_sparam());
  1371. add_opt(common_arg(
  1372. {"--dry-allowed-length"}, "N",
  1373. string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length),
  1374. [](common_params & params, int value) {
  1375. params.sampling.dry_allowed_length = value;
  1376. }
  1377. ).set_sparam());
  1378. add_opt(common_arg(
  1379. {"--dry-penalty-last-n"}, "N",
  1380. string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
  1381. [](common_params & params, int value) {
  1382. if (value < -1) {
  1383. throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
  1384. }
  1385. params.sampling.dry_penalty_last_n = value;
  1386. }
  1387. ).set_sparam());
  1388. add_opt(common_arg(
  1389. {"--dry-sequence-breaker"}, "STRING",
  1390. string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n",
  1391. params.sampling.dry_sequence_breakers.empty() ? "none" :
  1392. std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()),
  1393. params.sampling.dry_sequence_breakers.end(),
  1394. std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'",
  1395. [](const std::string& a, const std::string& b) {
  1396. std::string formatted_b = (b == "\n") ? "\\n" : b;
  1397. return a + ", '" + formatted_b + "'";
  1398. }).c_str()),
  1399. [](common_params & params, const std::string & value) {
  1400. static bool defaults_cleared = false;
  1401. if (!defaults_cleared) {
  1402. params.sampling.dry_sequence_breakers.clear();
  1403. defaults_cleared = true;
  1404. }
  1405. if (value == "none") {
  1406. params.sampling.dry_sequence_breakers.clear();
  1407. } else {
  1408. params.sampling.dry_sequence_breakers.emplace_back(value);
  1409. }
  1410. }
  1411. ).set_sparam());
  1412. add_opt(common_arg(
  1413. {"--dynatemp-range"}, "N",
  1414. string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range),
  1415. [](common_params & params, const std::string & value) {
  1416. params.sampling.dynatemp_range = std::stof(value);
  1417. }
  1418. ).set_sparam());
  1419. add_opt(common_arg(
  1420. {"--dynatemp-exp"}, "N",
  1421. string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent),
  1422. [](common_params & params, const std::string & value) {
  1423. params.sampling.dynatemp_exponent = std::stof(value);
  1424. }
  1425. ).set_sparam());
  1426. add_opt(common_arg(
  1427. {"--mirostat"}, "N",
  1428. string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n"
  1429. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat),
  1430. [](common_params & params, int value) {
  1431. params.sampling.mirostat = value;
  1432. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT;
  1433. }
  1434. ).set_sparam());
  1435. add_opt(common_arg(
  1436. {"--mirostat-lr"}, "N",
  1437. string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta),
  1438. [](common_params & params, const std::string & value) {
  1439. params.sampling.mirostat_eta = std::stof(value);
  1440. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_ETA;
  1441. }
  1442. ).set_sparam());
  1443. add_opt(common_arg(
  1444. {"--mirostat-ent"}, "N",
  1445. string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau),
  1446. [](common_params & params, const std::string & value) {
  1447. params.sampling.mirostat_tau = std::stof(value);
  1448. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_TAU;
  1449. }
  1450. ).set_sparam());
  1451. add_opt(common_arg(
  1452. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1453. "modifies the likelihood of token appearing in the completion,\n"
  1454. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1455. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1456. [](common_params & params, const std::string & value) {
  1457. std::stringstream ss(value);
  1458. llama_token key;
  1459. char sign;
  1460. std::string value_str;
  1461. try {
  1462. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1463. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1464. params.sampling.logit_bias.push_back({key, bias});
  1465. } else {
  1466. throw std::invalid_argument("invalid input format");
  1467. }
  1468. } catch (const std::exception&) {
  1469. throw std::invalid_argument("invalid input format");
  1470. }
  1471. }
  1472. ).set_sparam());
  1473. add_opt(common_arg(
  1474. {"--grammar"}, "GRAMMAR",
  1475. string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()),
  1476. [](common_params & params, const std::string & value) {
  1477. params.sampling.grammar = value;
  1478. }
  1479. ).set_sparam());
  1480. add_opt(common_arg(
  1481. {"--grammar-file"}, "FNAME",
  1482. "file to read grammar from",
  1483. [](common_params & params, const std::string & value) {
  1484. params.sampling.grammar = read_file(value);
  1485. }
  1486. ).set_sparam());
  1487. add_opt(common_arg(
  1488. {"-j", "--json-schema"}, "SCHEMA",
  1489. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1490. [](common_params & params, const std::string & value) {
  1491. params.sampling.grammar = json_schema_to_grammar(json::parse(value));
  1492. }
  1493. ).set_sparam());
  1494. add_opt(common_arg(
  1495. {"-jf", "--json-schema-file"}, "FILE",
  1496. "File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1497. [](common_params & params, const std::string & value) {
  1498. std::ifstream file(value);
  1499. if (!file) {
  1500. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1501. }
  1502. std::string schema;
  1503. std::copy(
  1504. std::istreambuf_iterator<char>(file),
  1505. std::istreambuf_iterator<char>(),
  1506. std::back_inserter(schema)
  1507. );
  1508. params.sampling.grammar = json_schema_to_grammar(json::parse(schema));
  1509. }
  1510. ).set_sparam());
  1511. add_opt(common_arg(
  1512. {"--pooling"}, "{none,mean,cls,last,rank}",
  1513. "pooling type for embeddings, use model default if unspecified",
  1514. [](common_params & params, const std::string & value) {
  1515. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1516. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1517. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1518. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1519. else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; }
  1520. else { throw std::invalid_argument("invalid value"); }
  1521. }
  1522. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING"));
  1523. add_opt(common_arg(
  1524. {"--attention"}, "{causal,non-causal}",
  1525. "attention type for embeddings, use model default if unspecified",
  1526. [](common_params & params, const std::string & value) {
  1527. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1528. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1529. else { throw std::invalid_argument("invalid value"); }
  1530. }
  1531. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1532. add_opt(common_arg(
  1533. {"--rope-scaling"}, "{none,linear,yarn}",
  1534. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1535. [](common_params & params, const std::string & value) {
  1536. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1537. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1538. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1539. else { throw std::invalid_argument("invalid value"); }
  1540. }
  1541. ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE"));
  1542. add_opt(common_arg(
  1543. {"--rope-scale"}, "N",
  1544. "RoPE context scaling factor, expands context by a factor of N",
  1545. [](common_params & params, const std::string & value) {
  1546. params.rope_freq_scale = 1.0f / std::stof(value);
  1547. }
  1548. ).set_env("LLAMA_ARG_ROPE_SCALE"));
  1549. add_opt(common_arg(
  1550. {"--rope-freq-base"}, "N",
  1551. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1552. [](common_params & params, const std::string & value) {
  1553. params.rope_freq_base = std::stof(value);
  1554. }
  1555. ).set_env("LLAMA_ARG_ROPE_FREQ_BASE"));
  1556. add_opt(common_arg(
  1557. {"--rope-freq-scale"}, "N",
  1558. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1559. [](common_params & params, const std::string & value) {
  1560. params.rope_freq_scale = std::stof(value);
  1561. }
  1562. ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE"));
  1563. add_opt(common_arg(
  1564. {"--yarn-orig-ctx"}, "N",
  1565. string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1566. [](common_params & params, int value) {
  1567. params.yarn_orig_ctx = value;
  1568. }
  1569. ).set_env("LLAMA_ARG_YARN_ORIG_CTX"));
  1570. add_opt(common_arg(
  1571. {"--yarn-ext-factor"}, "N",
  1572. string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1573. [](common_params & params, const std::string & value) {
  1574. params.yarn_ext_factor = std::stof(value);
  1575. }
  1576. ).set_env("LLAMA_ARG_YARN_EXT_FACTOR"));
  1577. add_opt(common_arg(
  1578. {"--yarn-attn-factor"}, "N",
  1579. string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1580. [](common_params & params, const std::string & value) {
  1581. params.yarn_attn_factor = std::stof(value);
  1582. }
  1583. ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR"));
  1584. add_opt(common_arg(
  1585. {"--yarn-beta-slow"}, "N",
  1586. string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1587. [](common_params & params, const std::string & value) {
  1588. params.yarn_beta_slow = std::stof(value);
  1589. }
  1590. ).set_env("LLAMA_ARG_YARN_BETA_SLOW"));
  1591. add_opt(common_arg(
  1592. {"--yarn-beta-fast"}, "N",
  1593. string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1594. [](common_params & params, const std::string & value) {
  1595. params.yarn_beta_fast = std::stof(value);
  1596. }
  1597. ).set_env("LLAMA_ARG_YARN_BETA_FAST"));
  1598. add_opt(common_arg(
  1599. {"-gan", "--grp-attn-n"}, "N",
  1600. string_format("group-attention factor (default: %d)", params.grp_attn_n),
  1601. [](common_params & params, int value) {
  1602. params.grp_attn_n = value;
  1603. }
  1604. ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_PASSKEY}));
  1605. add_opt(common_arg(
  1606. {"-gaw", "--grp-attn-w"}, "N",
  1607. string_format("group-attention width (default: %d)", params.grp_attn_w),
  1608. [](common_params & params, int value) {
  1609. params.grp_attn_w = value;
  1610. }
  1611. ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_COMPLETION}));
  1612. add_opt(common_arg(
  1613. {"-nkvo", "--no-kv-offload"},
  1614. "disable KV offload",
  1615. [](common_params & params) {
  1616. params.no_kv_offload = true;
  1617. }
  1618. ).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
  1619. add_opt(common_arg(
  1620. {"-nr", "--no-repack"},
  1621. "disable weight repacking",
  1622. [](common_params & params) {
  1623. params.no_extra_bufts = true;
  1624. }
  1625. ).set_env("LLAMA_ARG_NO_REPACK"));
  1626. add_opt(common_arg(
  1627. {"--no-host"},
  1628. "bypass host buffer allowing extra buffers to be used",
  1629. [](common_params & params) {
  1630. params.no_host = true;
  1631. }
  1632. ).set_env("LLAMA_ARG_NO_HOST"));
  1633. add_opt(common_arg(
  1634. {"-ctk", "--cache-type-k"}, "TYPE",
  1635. string_format(
  1636. "KV cache data type for K\n"
  1637. "allowed values: %s\n"
  1638. "(default: %s)",
  1639. get_all_kv_cache_types().c_str(),
  1640. ggml_type_name(params.cache_type_k)
  1641. ),
  1642. [](common_params & params, const std::string & value) {
  1643. params.cache_type_k = kv_cache_type_from_str(value);
  1644. }
  1645. ).set_env("LLAMA_ARG_CACHE_TYPE_K"));
  1646. add_opt(common_arg(
  1647. {"-ctv", "--cache-type-v"}, "TYPE",
  1648. string_format(
  1649. "KV cache data type for V\n"
  1650. "allowed values: %s\n"
  1651. "(default: %s)",
  1652. get_all_kv_cache_types().c_str(),
  1653. ggml_type_name(params.cache_type_v)
  1654. ),
  1655. [](common_params & params, const std::string & value) {
  1656. params.cache_type_v = kv_cache_type_from_str(value);
  1657. }
  1658. ).set_env("LLAMA_ARG_CACHE_TYPE_V"));
  1659. add_opt(common_arg(
  1660. {"--hellaswag"},
  1661. "compute HellaSwag score over random tasks from datafile supplied with -f",
  1662. [](common_params & params) {
  1663. params.hellaswag = true;
  1664. }
  1665. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1666. add_opt(common_arg(
  1667. {"--hellaswag-tasks"}, "N",
  1668. string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  1669. [](common_params & params, int value) {
  1670. params.hellaswag_tasks = value;
  1671. }
  1672. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1673. add_opt(common_arg(
  1674. {"--winogrande"},
  1675. "compute Winogrande score over random tasks from datafile supplied with -f",
  1676. [](common_params & params) {
  1677. params.winogrande = true;
  1678. }
  1679. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1680. add_opt(common_arg(
  1681. {"--winogrande-tasks"}, "N",
  1682. string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  1683. [](common_params & params, int value) {
  1684. params.winogrande_tasks = value;
  1685. }
  1686. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1687. add_opt(common_arg(
  1688. {"--multiple-choice"},
  1689. "compute multiple choice score over random tasks from datafile supplied with -f",
  1690. [](common_params & params) {
  1691. params.multiple_choice = true;
  1692. }
  1693. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1694. add_opt(common_arg(
  1695. {"--multiple-choice-tasks"}, "N",
  1696. string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  1697. [](common_params & params, int value) {
  1698. params.multiple_choice_tasks = value;
  1699. }
  1700. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1701. add_opt(common_arg(
  1702. {"--kl-divergence"},
  1703. "computes KL-divergence to logits provided via --kl-divergence-base",
  1704. [](common_params & params) {
  1705. params.kl_divergence = true;
  1706. }
  1707. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1708. add_opt(common_arg(
  1709. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  1710. "set logits file",
  1711. [](common_params & params, const std::string & value) {
  1712. params.logits_file = value;
  1713. }
  1714. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1715. add_opt(common_arg(
  1716. {"--ppl-stride"}, "N",
  1717. string_format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  1718. [](common_params & params, int value) {
  1719. params.ppl_stride = value;
  1720. }
  1721. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1722. add_opt(common_arg(
  1723. {"--ppl-output-type"}, "<0|1>",
  1724. string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  1725. [](common_params & params, int value) {
  1726. params.ppl_output_type = value;
  1727. }
  1728. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1729. add_opt(common_arg(
  1730. {"-dt", "--defrag-thold"}, "N",
  1731. string_format("KV cache defragmentation threshold (DEPRECATED)"),
  1732. [](common_params & params, const std::string & value) {
  1733. GGML_UNUSED(params);
  1734. GGML_UNUSED(value);
  1735. LOG_WRN("DEPRECATED: --defrag-thold is deprecated and no longer necessary to specify\n");
  1736. }
  1737. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  1738. add_opt(common_arg(
  1739. {"-np", "--parallel"}, "N",
  1740. string_format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  1741. [](common_params & params, int value) {
  1742. params.n_parallel = value;
  1743. }
  1744. ).set_env("LLAMA_ARG_N_PARALLEL"));
  1745. add_opt(common_arg(
  1746. {"-ns", "--sequences"}, "N",
  1747. string_format("number of sequences to decode (default: %d)", params.n_sequences),
  1748. [](common_params & params, int value) {
  1749. params.n_sequences = value;
  1750. }
  1751. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  1752. add_opt(common_arg(
  1753. {"-cb", "--cont-batching"},
  1754. string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  1755. [](common_params & params) {
  1756. params.cont_batching = true;
  1757. }
  1758. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  1759. add_opt(common_arg(
  1760. {"-nocb", "--no-cont-batching"},
  1761. "disable continuous batching",
  1762. [](common_params & params) {
  1763. params.cont_batching = false;
  1764. }
  1765. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  1766. add_opt(common_arg(
  1767. {"--mmproj"}, "FILE",
  1768. "path to a multimodal projector file. see tools/mtmd/README.md\n"
  1769. "note: if -hf is used, this argument can be omitted",
  1770. [](common_params & params, const std::string & value) {
  1771. params.mmproj.path = value;
  1772. }
  1773. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ"));
  1774. add_opt(common_arg(
  1775. {"--mmproj-url"}, "URL",
  1776. "URL to a multimodal projector file. see tools/mtmd/README.md",
  1777. [](common_params & params, const std::string & value) {
  1778. params.mmproj.url = value;
  1779. }
  1780. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ_URL"));
  1781. add_opt(common_arg(
  1782. {"--no-mmproj"},
  1783. "explicitly disable multimodal projector, useful when using -hf",
  1784. [](common_params & params) {
  1785. params.no_mmproj = true;
  1786. }
  1787. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ"));
  1788. add_opt(common_arg(
  1789. {"--no-mmproj-offload"},
  1790. "do not offload multimodal projector to GPU",
  1791. [](common_params & params) {
  1792. params.mmproj_use_gpu = false;
  1793. }
  1794. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD"));
  1795. add_opt(common_arg(
  1796. {"--image", "--audio"}, "FILE",
  1797. "path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n",
  1798. [](common_params & params, const std::string & value) {
  1799. params.image.emplace_back(value);
  1800. }
  1801. ).set_examples({LLAMA_EXAMPLE_MTMD, LLAMA_EXAMPLE_CLI}));
  1802. add_opt(common_arg(
  1803. {"--image-min-tokens"}, "N",
  1804. "minimum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)",
  1805. [](common_params & params, int value) {
  1806. params.image_min_tokens = value;
  1807. }
  1808. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_IMAGE_MIN_TOKENS"));
  1809. add_opt(common_arg(
  1810. {"--image-max-tokens"}, "N",
  1811. "maximum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)",
  1812. [](common_params & params, int value) {
  1813. params.image_max_tokens = value;
  1814. }
  1815. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_IMAGE_MAX_TOKENS"));
  1816. if (llama_supports_rpc()) {
  1817. add_opt(common_arg(
  1818. {"--rpc"}, "SERVERS",
  1819. "comma separated list of RPC servers",
  1820. [](common_params & params, const std::string & value) {
  1821. add_rpc_devices(value);
  1822. GGML_UNUSED(params);
  1823. }
  1824. ).set_env("LLAMA_ARG_RPC"));
  1825. }
  1826. add_opt(common_arg(
  1827. {"--mlock"},
  1828. "force system to keep model in RAM rather than swapping or compressing",
  1829. [](common_params & params) {
  1830. params.use_mlock = true;
  1831. }
  1832. ).set_env("LLAMA_ARG_MLOCK"));
  1833. add_opt(common_arg(
  1834. {"--no-mmap"},
  1835. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  1836. [](common_params & params) {
  1837. params.use_mmap = false;
  1838. }
  1839. ).set_env("LLAMA_ARG_NO_MMAP"));
  1840. add_opt(common_arg(
  1841. {"--numa"}, "TYPE",
  1842. "attempt optimizations that help on some NUMA systems\n"
  1843. "- distribute: spread execution evenly over all nodes\n"
  1844. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  1845. "- numactl: use the CPU map provided by numactl\n"
  1846. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  1847. "see https://github.com/ggml-org/llama.cpp/issues/1437",
  1848. [](common_params & params, const std::string & value) {
  1849. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  1850. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  1851. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  1852. else { throw std::invalid_argument("invalid value"); }
  1853. }
  1854. ).set_env("LLAMA_ARG_NUMA"));
  1855. add_opt(common_arg(
  1856. {"-dev", "--device"}, "<dev1,dev2,..>",
  1857. "comma-separated list of devices to use for offloading (none = don't offload)\n"
  1858. "use --list-devices to see a list of available devices",
  1859. [](common_params & params, const std::string & value) {
  1860. params.devices = parse_device_list(value);
  1861. }
  1862. ).set_env("LLAMA_ARG_DEVICE"));
  1863. add_opt(common_arg(
  1864. {"--list-devices"},
  1865. "print list of available devices and exit",
  1866. [](common_params &) {
  1867. std::vector<ggml_backend_dev_t> devices;
  1868. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  1869. auto * dev = ggml_backend_dev_get(i);
  1870. if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
  1871. devices.push_back(dev);
  1872. }
  1873. }
  1874. printf("Available devices:\n");
  1875. for (auto * dev : devices) {
  1876. size_t free, total;
  1877. ggml_backend_dev_memory(dev, &free, &total);
  1878. printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
  1879. }
  1880. exit(0);
  1881. }
  1882. ));
  1883. add_opt(common_arg(
  1884. {"--override-tensor", "-ot"}, "<tensor name pattern>=<buffer type>,...",
  1885. "override tensor buffer type", [](common_params & params, const std::string & value) {
  1886. parse_tensor_buffer_overrides(value, params.tensor_buft_overrides);
  1887. }
  1888. ));
  1889. add_opt(common_arg(
  1890. {"--override-tensor-draft", "-otd"}, "<tensor name pattern>=<buffer type>,...",
  1891. "override tensor buffer type for draft model", [](common_params & params, const std::string & value) {
  1892. parse_tensor_buffer_overrides(value, params.speculative.tensor_buft_overrides);
  1893. }
  1894. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  1895. add_opt(common_arg(
  1896. {"--cpu-moe", "-cmoe"},
  1897. "keep all Mixture of Experts (MoE) weights in the CPU",
  1898. [](common_params & params) {
  1899. params.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
  1900. }
  1901. ).set_env("LLAMA_ARG_CPU_MOE"));
  1902. add_opt(common_arg(
  1903. {"--n-cpu-moe", "-ncmoe"}, "N",
  1904. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU",
  1905. [](common_params & params, int value) {
  1906. if (value < 0) {
  1907. throw std::invalid_argument("invalid value");
  1908. }
  1909. for (int i = 0; i < value; ++i) {
  1910. // keep strings alive and avoid leaking memory by storing them in a static vector
  1911. static std::list<std::string> buft_overrides;
  1912. buft_overrides.push_back(llm_ffn_exps_block_regex(i));
  1913. params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
  1914. }
  1915. }
  1916. ).set_env("LLAMA_ARG_N_CPU_MOE"));
  1917. add_opt(common_arg(
  1918. {"--cpu-moe-draft", "-cmoed"},
  1919. "keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
  1920. [](common_params & params) {
  1921. params.speculative.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
  1922. }
  1923. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
  1924. add_opt(common_arg(
  1925. {"--n-cpu-moe-draft", "-ncmoed"}, "N",
  1926. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model",
  1927. [](common_params & params, int value) {
  1928. if (value < 0) {
  1929. throw std::invalid_argument("invalid value");
  1930. }
  1931. for (int i = 0; i < value; ++i) {
  1932. static std::list<std::string> buft_overrides_draft;
  1933. buft_overrides_draft.push_back(llm_ffn_exps_block_regex(i));
  1934. params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
  1935. }
  1936. }
  1937. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_N_CPU_MOE_DRAFT"));
  1938. add_opt(common_arg(
  1939. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  1940. string_format("max. number of layers to store in VRAM (default: %d)", params.n_gpu_layers),
  1941. [](common_params & params, int value) {
  1942. params.n_gpu_layers = value;
  1943. if (!llama_supports_gpu_offload()) {
  1944. fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
  1945. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  1946. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  1947. }
  1948. }
  1949. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  1950. add_opt(common_arg(
  1951. {"-sm", "--split-mode"}, "{none,layer,row}",
  1952. "how to split the model across multiple GPUs, one of:\n"
  1953. "- none: use one GPU only\n"
  1954. "- layer (default): split layers and KV across GPUs\n"
  1955. "- row: split rows across GPUs",
  1956. [](common_params & params, const std::string & value) {
  1957. std::string arg_next = value;
  1958. if (arg_next == "none") {
  1959. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  1960. } else if (arg_next == "layer") {
  1961. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  1962. } else if (arg_next == "row") {
  1963. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  1964. } else {
  1965. throw std::invalid_argument("invalid value");
  1966. }
  1967. if (!llama_supports_gpu_offload()) {
  1968. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n");
  1969. }
  1970. }
  1971. ).set_env("LLAMA_ARG_SPLIT_MODE"));
  1972. add_opt(common_arg(
  1973. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  1974. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  1975. [](common_params & params, const std::string & value) {
  1976. std::string arg_next = value;
  1977. // split string by , and /
  1978. const std::regex regex{ R"([,/]+)" };
  1979. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  1980. std::vector<std::string> split_arg{ it, {} };
  1981. if (split_arg.size() >= llama_max_devices()) {
  1982. throw std::invalid_argument(
  1983. string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  1984. );
  1985. }
  1986. for (size_t i = 0; i < llama_max_devices(); ++i) {
  1987. if (i < split_arg.size()) {
  1988. params.tensor_split[i] = std::stof(split_arg[i]);
  1989. } else {
  1990. params.tensor_split[i] = 0.0f;
  1991. }
  1992. }
  1993. if (!llama_supports_gpu_offload()) {
  1994. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n");
  1995. }
  1996. }
  1997. ).set_env("LLAMA_ARG_TENSOR_SPLIT"));
  1998. add_opt(common_arg(
  1999. {"-mg", "--main-gpu"}, "INDEX",
  2000. string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  2001. [](common_params & params, int value) {
  2002. params.main_gpu = value;
  2003. if (!llama_supports_gpu_offload()) {
  2004. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n");
  2005. }
  2006. }
  2007. ).set_env("LLAMA_ARG_MAIN_GPU"));
  2008. add_opt(common_arg(
  2009. {"--check-tensors"},
  2010. string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  2011. [](common_params & params) {
  2012. params.check_tensors = true;
  2013. }
  2014. ));
  2015. add_opt(common_arg(
  2016. {"--override-kv"}, "KEY=TYPE:VALUE",
  2017. "advanced option to override model metadata by key. may be specified multiple times.\n"
  2018. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  2019. [](common_params & params, const std::string & value) {
  2020. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  2021. throw std::runtime_error(string_format("error: Invalid type for KV override: %s\n", value.c_str()));
  2022. }
  2023. }
  2024. ));
  2025. add_opt(common_arg(
  2026. {"--no-op-offload"},
  2027. string_format("disable offloading host tensor operations to device (default: %s)", params.no_op_offload ? "true" : "false"),
  2028. [](common_params & params) {
  2029. params.no_op_offload = true;
  2030. }
  2031. ));
  2032. add_opt(common_arg(
  2033. {"--lora"}, "FNAME",
  2034. "path to LoRA adapter (can be repeated to use multiple adapters)",
  2035. [](common_params & params, const std::string & value) {
  2036. params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
  2037. }
  2038. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2039. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2040. add_opt(common_arg(
  2041. {"--lora-scaled"}, "FNAME", "SCALE",
  2042. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  2043. [](common_params & params, const std::string & fname, const std::string & scale) {
  2044. params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
  2045. }
  2046. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2047. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2048. add_opt(common_arg(
  2049. {"--control-vector"}, "FNAME",
  2050. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  2051. [](common_params & params, const std::string & value) {
  2052. params.control_vectors.push_back({ 1.0f, value, });
  2053. }
  2054. ));
  2055. add_opt(common_arg(
  2056. {"--control-vector-scaled"}, "FNAME", "SCALE",
  2057. "add a control vector with user defined scaling SCALE\n"
  2058. "note: this argument can be repeated to add multiple scaled control vectors",
  2059. [](common_params & params, const std::string & fname, const std::string & scale) {
  2060. params.control_vectors.push_back({ std::stof(scale), fname });
  2061. }
  2062. ));
  2063. add_opt(common_arg(
  2064. {"--control-vector-layer-range"}, "START", "END",
  2065. "layer range to apply the control vector(s) to, start and end inclusive",
  2066. [](common_params & params, const std::string & start, const std::string & end) {
  2067. params.control_vector_layer_start = std::stoi(start);
  2068. params.control_vector_layer_end = std::stoi(end);
  2069. }
  2070. ));
  2071. add_opt(common_arg(
  2072. {"-a", "--alias"}, "STRING",
  2073. "set alias for model name (to be used by REST API)",
  2074. [](common_params & params, const std::string & value) {
  2075. params.model_alias = value;
  2076. }
  2077. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS"));
  2078. add_opt(common_arg(
  2079. {"-m", "--model"}, "FNAME",
  2080. ex == LLAMA_EXAMPLE_EXPORT_LORA
  2081. ? "model path from which to load base model"
  2082. : "model path to load",
  2083. [](common_params & params, const std::string & value) {
  2084. params.model.path = value;
  2085. }
  2086. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  2087. add_opt(common_arg(
  2088. {"-mu", "--model-url"}, "MODEL_URL",
  2089. "model download url (default: unused)",
  2090. [](common_params & params, const std::string & value) {
  2091. params.model.url = value;
  2092. }
  2093. ).set_env("LLAMA_ARG_MODEL_URL"));
  2094. add_opt(common_arg(
  2095. { "-dr", "--docker-repo" }, "[<repo>/]<model>[:quant]",
  2096. "Docker Hub model repository. repo is optional, default to ai/. quant is optional, default to :latest.\n"
  2097. "example: gemma3\n"
  2098. "(default: unused)",
  2099. [](common_params & params, const std::string & value) {
  2100. params.model.docker_repo = value;
  2101. }
  2102. ).set_env("LLAMA_ARG_DOCKER_REPO"));
  2103. add_opt(common_arg(
  2104. {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]",
  2105. "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
  2106. "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n"
  2107. "example: unsloth/phi-4-GGUF:q4_k_m\n"
  2108. "(default: unused)",
  2109. [](common_params & params, const std::string & value) {
  2110. params.model.hf_repo = value;
  2111. }
  2112. ).set_env("LLAMA_ARG_HF_REPO"));
  2113. add_opt(common_arg(
  2114. {"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
  2115. "Same as --hf-repo, but for the draft model (default: unused)",
  2116. [](common_params & params, const std::string & value) {
  2117. params.speculative.model.hf_repo = value;
  2118. }
  2119. ).set_env("LLAMA_ARG_HFD_REPO"));
  2120. add_opt(common_arg(
  2121. {"-hff", "--hf-file"}, "FILE",
  2122. "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
  2123. [](common_params & params, const std::string & value) {
  2124. params.model.hf_file = value;
  2125. }
  2126. ).set_env("LLAMA_ARG_HF_FILE"));
  2127. add_opt(common_arg(
  2128. {"-hfv", "-hfrv", "--hf-repo-v"}, "<user>/<model>[:quant]",
  2129. "Hugging Face model repository for the vocoder model (default: unused)",
  2130. [](common_params & params, const std::string & value) {
  2131. params.vocoder.model.hf_repo = value;
  2132. }
  2133. ).set_env("LLAMA_ARG_HF_REPO_V"));
  2134. add_opt(common_arg(
  2135. {"-hffv", "--hf-file-v"}, "FILE",
  2136. "Hugging Face model file for the vocoder model (default: unused)",
  2137. [](common_params & params, const std::string & value) {
  2138. params.vocoder.model.hf_file = value;
  2139. }
  2140. ).set_env("LLAMA_ARG_HF_FILE_V"));
  2141. add_opt(common_arg(
  2142. {"-hft", "--hf-token"}, "TOKEN",
  2143. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  2144. [](common_params & params, const std::string & value) {
  2145. params.hf_token = value;
  2146. }
  2147. ).set_env("HF_TOKEN"));
  2148. add_opt(common_arg(
  2149. {"--context-file"}, "FNAME",
  2150. "file to load context from (repeat to specify multiple files)",
  2151. [](common_params & params, const std::string & value) {
  2152. std::ifstream file(value, std::ios::binary);
  2153. if (!file) {
  2154. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2155. }
  2156. params.context_files.push_back(value);
  2157. }
  2158. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2159. add_opt(common_arg(
  2160. {"--chunk-size"}, "N",
  2161. string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  2162. [](common_params & params, int value) {
  2163. params.chunk_size = value;
  2164. }
  2165. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2166. add_opt(common_arg(
  2167. {"--chunk-separator"}, "STRING",
  2168. string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  2169. [](common_params & params, const std::string & value) {
  2170. params.chunk_separator = value;
  2171. }
  2172. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2173. add_opt(common_arg(
  2174. {"--junk"}, "N",
  2175. string_format("number of times to repeat the junk text (default: %d)", params.n_junk),
  2176. [](common_params & params, int value) {
  2177. params.n_junk = value;
  2178. }
  2179. ).set_examples({LLAMA_EXAMPLE_PASSKEY, LLAMA_EXAMPLE_PARALLEL}));
  2180. add_opt(common_arg(
  2181. {"--pos"}, "N",
  2182. string_format("position of the passkey in the junk text (default: %d)", params.i_pos),
  2183. [](common_params & params, int value) {
  2184. params.i_pos = value;
  2185. }
  2186. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  2187. add_opt(common_arg(
  2188. {"-o", "--output", "--output-file"}, "FNAME",
  2189. string_format("output file (default: '%s')", params.out_file.c_str()),
  2190. [](common_params & params, const std::string & value) {
  2191. params.out_file = value;
  2192. }
  2193. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE}));
  2194. add_opt(common_arg(
  2195. {"-ofreq", "--output-frequency"}, "N",
  2196. string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  2197. [](common_params & params, int value) {
  2198. params.n_out_freq = value;
  2199. }
  2200. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2201. add_opt(common_arg(
  2202. {"--output-format"}, "{gguf,dat}",
  2203. string_format("output format for imatrix file (default: %s)", params.imat_dat > 0 ? "dat" : "gguf"),
  2204. [](common_params & params, const std::string & value) {
  2205. /**/ if (value == "gguf") { params.imat_dat = -1; }
  2206. else if (value == "dat") { params.imat_dat = 1; }
  2207. else { throw std::invalid_argument("invalid output format"); }
  2208. }
  2209. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2210. add_opt(common_arg(
  2211. {"--save-frequency"}, "N",
  2212. string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  2213. [](common_params & params, int value) {
  2214. params.n_save_freq = value;
  2215. }
  2216. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2217. add_opt(common_arg(
  2218. {"--process-output"},
  2219. string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  2220. [](common_params & params) {
  2221. params.process_output = true;
  2222. }
  2223. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2224. add_opt(common_arg(
  2225. {"--no-ppl"},
  2226. string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  2227. [](common_params & params) {
  2228. params.compute_ppl = false;
  2229. }
  2230. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2231. add_opt(common_arg(
  2232. {"--chunk", "--from-chunk"}, "N",
  2233. string_format("start processing the input from chunk N (default: %d)", params.i_chunk),
  2234. [](common_params & params, int value) {
  2235. params.i_chunk = value;
  2236. }
  2237. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2238. add_opt(common_arg(
  2239. {"--show-statistics"},
  2240. string_format("show imatrix statistics and then exit (default: %s)", params.show_statistics ? "true" : "false"),
  2241. [](common_params & params) {
  2242. params.show_statistics = true;
  2243. }
  2244. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2245. add_opt(common_arg(
  2246. {"--parse-special"},
  2247. string_format("parse special tokens (chat, tool, etc) (default: %s)", params.parse_special ? "true" : "false"),
  2248. [](common_params & params) {
  2249. params.parse_special = true;
  2250. }
  2251. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2252. add_opt(common_arg(
  2253. {"-pps"},
  2254. string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  2255. [](common_params & params) {
  2256. params.is_pp_shared = true;
  2257. }
  2258. ).set_examples({LLAMA_EXAMPLE_BENCH, LLAMA_EXAMPLE_PARALLEL}));
  2259. add_opt(common_arg(
  2260. {"-tgs"},
  2261. string_format("is the text generation separated across the different sequences (default: %s)", params.is_tg_separate ? "true" : "false"),
  2262. [](common_params & params) {
  2263. params.is_tg_separate = true;
  2264. }
  2265. ).set_examples({LLAMA_EXAMPLE_BENCH, LLAMA_EXAMPLE_PARALLEL}));
  2266. add_opt(common_arg(
  2267. {"-npp"}, "n0,n1,...",
  2268. "number of prompt tokens",
  2269. [](common_params & params, const std::string & value) {
  2270. auto p = string_split<int>(value, ',');
  2271. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  2272. }
  2273. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2274. add_opt(common_arg(
  2275. {"-ntg"}, "n0,n1,...",
  2276. "number of text generation tokens",
  2277. [](common_params & params, const std::string & value) {
  2278. auto p = string_split<int>(value, ',');
  2279. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  2280. }
  2281. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2282. add_opt(common_arg(
  2283. {"-npl"}, "n0,n1,...",
  2284. "number of parallel prompts",
  2285. [](common_params & params, const std::string & value) {
  2286. auto p = string_split<int>(value, ',');
  2287. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  2288. }
  2289. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2290. add_opt(common_arg(
  2291. {"--embd-normalize"}, "N",
  2292. string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  2293. [](common_params & params, int value) {
  2294. params.embd_normalize = value;
  2295. }
  2296. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2297. add_opt(common_arg(
  2298. {"--embd-output-format"}, "FORMAT",
  2299. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix, \"raw\" = plain whitespace-delimited output (one embedding per line)",
  2300. [](common_params & params, const std::string & value) {
  2301. params.embd_out = value;
  2302. }
  2303. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2304. add_opt(common_arg(
  2305. {"--embd-separator"}, "STRING",
  2306. "separator of embeddings (default \\n) for example \"<#sep#>\"",
  2307. [](common_params & params, const std::string & value) {
  2308. params.embd_sep = value;
  2309. }
  2310. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2311. add_opt(common_arg(
  2312. {"--cls-separator"}, "STRING",
  2313. "separator of classification sequences (default \\t) for example \"<#seq#>\"",
  2314. [](common_params & params, const std::string & value) {
  2315. params.cls_sep = value;
  2316. }
  2317. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2318. add_opt(common_arg(
  2319. {"--host"}, "HOST",
  2320. string_format("ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: %s)", params.hostname.c_str()),
  2321. [](common_params & params, const std::string & value) {
  2322. params.hostname = value;
  2323. }
  2324. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  2325. add_opt(common_arg(
  2326. {"--port"}, "PORT",
  2327. string_format("port to listen (default: %d)", params.port),
  2328. [](common_params & params, int value) {
  2329. params.port = value;
  2330. }
  2331. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  2332. add_opt(common_arg(
  2333. {"--path"}, "PATH",
  2334. string_format("path to serve static files from (default: %s)", params.public_path.c_str()),
  2335. [](common_params & params, const std::string & value) {
  2336. params.public_path = value;
  2337. }
  2338. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
  2339. add_opt(common_arg(
  2340. {"--api-prefix"}, "PREFIX",
  2341. string_format("prefix path the server serves from, without the trailing slash (default: %s)", params.api_prefix.c_str()),
  2342. [](common_params & params, const std::string & value) {
  2343. params.api_prefix = value;
  2344. }
  2345. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_API_PREFIX"));
  2346. add_opt(common_arg(
  2347. {"--no-webui"},
  2348. string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),
  2349. [](common_params & params) {
  2350. params.webui = false;
  2351. }
  2352. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI"));
  2353. add_opt(common_arg(
  2354. {"--embedding", "--embeddings"},
  2355. string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  2356. [](common_params & params) {
  2357. params.embedding = true;
  2358. }
  2359. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  2360. add_opt(common_arg(
  2361. {"--reranking", "--rerank"},
  2362. string_format("enable reranking endpoint on server (default: %s)", "disabled"),
  2363. [](common_params & params) {
  2364. params.embedding = true;
  2365. params.pooling_type = LLAMA_POOLING_TYPE_RANK;
  2366. }
  2367. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING"));
  2368. add_opt(common_arg(
  2369. {"--api-key"}, "KEY",
  2370. "API key to use for authentication (default: none)",
  2371. [](common_params & params, const std::string & value) {
  2372. params.api_keys.push_back(value);
  2373. }
  2374. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  2375. add_opt(common_arg(
  2376. {"--api-key-file"}, "FNAME",
  2377. "path to file containing API keys (default: none)",
  2378. [](common_params & params, const std::string & value) {
  2379. std::ifstream key_file(value);
  2380. if (!key_file) {
  2381. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2382. }
  2383. std::string key;
  2384. while (std::getline(key_file, key)) {
  2385. if (!key.empty()) {
  2386. params.api_keys.push_back(key);
  2387. }
  2388. }
  2389. key_file.close();
  2390. }
  2391. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2392. add_opt(common_arg(
  2393. {"--ssl-key-file"}, "FNAME",
  2394. "path to file a PEM-encoded SSL private key",
  2395. [](common_params & params, const std::string & value) {
  2396. params.ssl_file_key = value;
  2397. }
  2398. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE"));
  2399. add_opt(common_arg(
  2400. {"--ssl-cert-file"}, "FNAME",
  2401. "path to file a PEM-encoded SSL certificate",
  2402. [](common_params & params, const std::string & value) {
  2403. params.ssl_file_cert = value;
  2404. }
  2405. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE"));
  2406. add_opt(common_arg(
  2407. {"--chat-template-kwargs"}, "STRING",
  2408. string_format("sets additional params for the json template parser"),
  2409. [](common_params & params, const std::string & value) {
  2410. auto parsed = json::parse(value);
  2411. for (const auto & item : parsed.items()) {
  2412. params.default_template_kwargs[item.key()] = item.value().dump();
  2413. }
  2414. }
  2415. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_CHAT_TEMPLATE_KWARGS"));
  2416. add_opt(common_arg(
  2417. {"-to", "--timeout"}, "N",
  2418. string_format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  2419. [](common_params & params, int value) {
  2420. params.timeout_read = value;
  2421. params.timeout_write = value;
  2422. }
  2423. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT"));
  2424. add_opt(common_arg(
  2425. {"--threads-http"}, "N",
  2426. string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  2427. [](common_params & params, int value) {
  2428. params.n_threads_http = value;
  2429. }
  2430. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  2431. add_opt(common_arg(
  2432. {"--cache-reuse"}, "N",
  2433. string_format(
  2434. "min chunk size to attempt reusing from the cache via KV shifting (default: %d)\n"
  2435. "[(card)](https://ggml.ai/f0.png)", params.n_cache_reuse
  2436. ),
  2437. [](common_params & params, int value) {
  2438. params.n_cache_reuse = value;
  2439. }
  2440. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
  2441. add_opt(common_arg(
  2442. {"--metrics"},
  2443. string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  2444. [](common_params & params) {
  2445. params.endpoint_metrics = true;
  2446. }
  2447. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  2448. add_opt(common_arg(
  2449. {"--props"},
  2450. string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
  2451. [](common_params & params) {
  2452. params.endpoint_props = true;
  2453. }
  2454. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
  2455. add_opt(common_arg(
  2456. {"--slots"},
  2457. string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  2458. [](common_params & params) {
  2459. params.endpoint_slots = true;
  2460. }
  2461. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
  2462. add_opt(common_arg(
  2463. {"--no-slots"},
  2464. "disables slots monitoring endpoint",
  2465. [](common_params & params) {
  2466. params.endpoint_slots = false;
  2467. }
  2468. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  2469. add_opt(common_arg(
  2470. {"--slot-save-path"}, "PATH",
  2471. "path to save slot kv cache (default: disabled)",
  2472. [](common_params & params, const std::string & value) {
  2473. params.slot_save_path = value;
  2474. if (!fs_is_directory(params.slot_save_path)) {
  2475. throw std::invalid_argument("not a directory: " + value);
  2476. }
  2477. // if doesn't end with DIRECTORY_SEPARATOR, add it
  2478. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  2479. params.slot_save_path += DIRECTORY_SEPARATOR;
  2480. }
  2481. }
  2482. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2483. add_opt(common_arg(
  2484. {"--media-path"}, "PATH",
  2485. "directory for loading local media files; files can be accessed via file:// URLs using relative paths (default: disabled)",
  2486. [](common_params & params, const std::string & value) {
  2487. params.media_path = value;
  2488. if (!fs_is_directory(params.media_path)) {
  2489. throw std::invalid_argument("not a directory: " + value);
  2490. }
  2491. // if doesn't end with DIRECTORY_SEPARATOR, add it
  2492. if (!params.media_path.empty() && params.media_path[params.media_path.size() - 1] != DIRECTORY_SEPARATOR) {
  2493. params.media_path += DIRECTORY_SEPARATOR;
  2494. }
  2495. }
  2496. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2497. add_opt(common_arg(
  2498. {"--models-dir"}, "PATH",
  2499. "directory containing models for the router server (default: disabled)",
  2500. [](common_params & params, const std::string & value) {
  2501. params.models_dir = value;
  2502. }
  2503. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODELS_DIR"));
  2504. add_opt(common_arg(
  2505. {"--models-preset"}, "PATH",
  2506. "path to INI file containing model presets for the router server (default: disabled)",
  2507. [](common_params & params, const std::string & value) {
  2508. params.models_preset = value;
  2509. }
  2510. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODELS_PRESET"));
  2511. add_opt(common_arg(
  2512. {"--models-max"}, "N",
  2513. string_format("for router server, maximum number of models to load simultaneously (default: %d, 0 = unlimited)", params.models_max),
  2514. [](common_params & params, int value) {
  2515. params.models_max = value;
  2516. }
  2517. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODELS_MAX"));
  2518. add_opt(common_arg(
  2519. {"--no-models-autoload"},
  2520. "disables automatic loading of models (default: enabled)",
  2521. [](common_params & params) {
  2522. params.models_autoload = false;
  2523. }
  2524. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_MODELS_AUTOLOAD"));
  2525. add_opt(common_arg(
  2526. {"--jinja"},
  2527. string_format("use jinja template for chat (default: %s)", params.use_jinja ? "enabled" : "disabled"),
  2528. [](common_params & params) {
  2529. params.use_jinja = true;
  2530. }
  2531. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_JINJA"));
  2532. add_opt(common_arg(
  2533. {"--no-jinja"},
  2534. string_format("disable jinja template for chat (default: %s)", params.use_jinja ? "disabled" : "enabled"),
  2535. [](common_params & params) {
  2536. params.use_jinja = false;
  2537. }
  2538. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_NO_JINJA"));
  2539. add_opt(common_arg(
  2540. {"--reasoning-format"}, "FORMAT",
  2541. "controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:\n"
  2542. "- none: leaves thoughts unparsed in `message.content`\n"
  2543. "- deepseek: puts thoughts in `message.reasoning_content`\n"
  2544. "- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`\n"
  2545. "(default: auto)",
  2546. [](common_params & params, const std::string & value) {
  2547. params.reasoning_format = common_reasoning_format_from_name(value);
  2548. }
  2549. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_THINK"));
  2550. add_opt(common_arg(
  2551. {"--reasoning-budget"}, "N",
  2552. "controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)",
  2553. [](common_params & params, int value) {
  2554. if (value != 0 && value != -1) { throw std::invalid_argument("invalid value"); }
  2555. params.reasoning_budget = value;
  2556. }
  2557. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_THINK_BUDGET"));
  2558. add_opt(common_arg(
  2559. {"--chat-template"}, "JINJA_TEMPLATE",
  2560. string_format(
  2561. "set custom jinja chat template (default: template taken from model's metadata)\n"
  2562. "if suffix/prefix are specified, template will be disabled\n"
  2563. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2564. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2565. ),
  2566. [](common_params & params, const std::string & value) {
  2567. params.chat_template = value;
  2568. }
  2569. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  2570. add_opt(common_arg(
  2571. {"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
  2572. string_format(
  2573. "set custom jinja chat template file (default: template taken from model's metadata)\n"
  2574. "if suffix/prefix are specified, template will be disabled\n"
  2575. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2576. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2577. ),
  2578. [](common_params & params, const std::string & value) {
  2579. params.chat_template = read_file(value);
  2580. }
  2581. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
  2582. add_opt(common_arg(
  2583. {"--no-prefill-assistant"},
  2584. string_format(
  2585. "whether to prefill the assistant's response if the last message is an assistant message (default: prefill enabled)\n"
  2586. "when this flag is set, if the last message is an assistant message then it will be treated as a full message and not prefilled\n"
  2587. ),
  2588. [](common_params & params) {
  2589. params.prefill_assistant = false;
  2590. }
  2591. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_PREFILL_ASSISTANT"));
  2592. add_opt(common_arg(
  2593. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  2594. string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  2595. [](common_params & params, const std::string & value) {
  2596. params.slot_prompt_similarity = std::stof(value);
  2597. }
  2598. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2599. add_opt(common_arg(
  2600. {"--lora-init-without-apply"},
  2601. string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  2602. [](common_params & params) {
  2603. params.lora_init_without_apply = true;
  2604. }
  2605. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2606. add_opt(common_arg(
  2607. {"--simple-io"},
  2608. "use basic IO for better compatibility in subprocesses and limited consoles",
  2609. [](common_params & params) {
  2610. params.simple_io = true;
  2611. }
  2612. ).set_examples({LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI}));
  2613. add_opt(common_arg(
  2614. {"--positive-file"}, "FNAME",
  2615. string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  2616. [](common_params & params, const std::string & value) {
  2617. params.cvector_positive_file = value;
  2618. }
  2619. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2620. add_opt(common_arg(
  2621. {"--negative-file"}, "FNAME",
  2622. string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  2623. [](common_params & params, const std::string & value) {
  2624. params.cvector_negative_file = value;
  2625. }
  2626. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2627. add_opt(common_arg(
  2628. {"--pca-batch"}, "N",
  2629. string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  2630. [](common_params & params, int value) {
  2631. params.n_pca_batch = value;
  2632. }
  2633. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2634. add_opt(common_arg(
  2635. {"--pca-iter"}, "N",
  2636. string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  2637. [](common_params & params, int value) {
  2638. params.n_pca_iterations = value;
  2639. }
  2640. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2641. add_opt(common_arg(
  2642. {"--method"}, "{pca, mean}",
  2643. "dimensionality reduction method to be used (default: pca)",
  2644. [](common_params & params, const std::string & value) {
  2645. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  2646. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  2647. else { throw std::invalid_argument("invalid value"); }
  2648. }
  2649. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2650. add_opt(common_arg(
  2651. {"--output-format"}, "{md,jsonl}",
  2652. "output format for batched-bench results (default: md)",
  2653. [](common_params & params, const std::string & value) {
  2654. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  2655. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  2656. else { throw std::invalid_argument("invalid value"); }
  2657. }
  2658. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2659. add_opt(common_arg(
  2660. {"--log-disable"},
  2661. "Log disable",
  2662. [](common_params &) {
  2663. common_log_pause(common_log_main());
  2664. }
  2665. ));
  2666. add_opt(common_arg(
  2667. {"--log-file"}, "FNAME",
  2668. "Log to file",
  2669. [](common_params &, const std::string & value) {
  2670. common_log_set_file(common_log_main(), value.c_str());
  2671. }
  2672. ).set_env("LLAMA_LOG_FILE"));
  2673. add_opt(common_arg(
  2674. {"--log-colors"}, "[on|off|auto]",
  2675. "Set colored logging ('on', 'off', or 'auto', default: 'auto')\n"
  2676. "'auto' enables colors when output is to a terminal",
  2677. [](common_params &, const std::string & value) {
  2678. if (is_truthy(value)) {
  2679. common_log_set_colors(common_log_main(), LOG_COLORS_ENABLED);
  2680. } else if (is_falsey(value)) {
  2681. common_log_set_colors(common_log_main(), LOG_COLORS_DISABLED);
  2682. } else if (is_autoy(value)) {
  2683. common_log_set_colors(common_log_main(), LOG_COLORS_AUTO);
  2684. } else {
  2685. throw std::invalid_argument(
  2686. string_format("error: unknown value for --log-colors: '%s'\n", value.c_str()));
  2687. }
  2688. }
  2689. ).set_env("LLAMA_LOG_COLORS"));
  2690. add_opt(common_arg(
  2691. {"-v", "--verbose", "--log-verbose"},
  2692. "Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
  2693. [](common_params & params) {
  2694. params.verbosity = INT_MAX;
  2695. }
  2696. ));
  2697. add_opt(common_arg(
  2698. {"--offline"},
  2699. "Offline mode: forces use of cache, prevents network access",
  2700. [](common_params & params) {
  2701. params.offline = true;
  2702. }
  2703. ).set_env("LLAMA_OFFLINE"));
  2704. add_opt(common_arg(
  2705. {"-lv", "--verbosity", "--log-verbosity"}, "N",
  2706. string_format("Set the verbosity threshold. Messages with a higher verbosity will be ignored. Values:\n"
  2707. " - 0: generic output\n"
  2708. " - 1: error\n"
  2709. " - 2: warning\n"
  2710. " - 3: info\n"
  2711. " - 4: debug\n"
  2712. "(default: %d)\n", params.verbosity),
  2713. [](common_params & params, int value) {
  2714. params.verbosity = value;
  2715. }
  2716. ).set_env("LLAMA_LOG_VERBOSITY"));
  2717. add_opt(common_arg(
  2718. {"--log-prefix"},
  2719. "Enable prefix in log messages",
  2720. [](common_params &) {
  2721. common_log_set_prefix(common_log_main(), true);
  2722. }
  2723. ).set_env("LLAMA_LOG_PREFIX"));
  2724. add_opt(common_arg(
  2725. {"--log-timestamps"},
  2726. "Enable timestamps in log messages",
  2727. [](common_params &) {
  2728. common_log_set_timestamps(common_log_main(), true);
  2729. }
  2730. ).set_env("LLAMA_LOG_TIMESTAMPS"));
  2731. // speculative parameters
  2732. add_opt(common_arg(
  2733. {"-td", "--threads-draft"}, "N",
  2734. "number of threads to use during generation (default: same as --threads)",
  2735. [](common_params & params, int value) {
  2736. params.speculative.cpuparams.n_threads = value;
  2737. if (params.speculative.cpuparams.n_threads <= 0) {
  2738. params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
  2739. }
  2740. }
  2741. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2742. add_opt(common_arg(
  2743. {"-tbd", "--threads-batch-draft"}, "N",
  2744. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  2745. [](common_params & params, int value) {
  2746. params.speculative.cpuparams_batch.n_threads = value;
  2747. if (params.speculative.cpuparams_batch.n_threads <= 0) {
  2748. params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  2749. }
  2750. }
  2751. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2752. add_opt(common_arg(
  2753. {"-Cd", "--cpu-mask-draft"}, "M",
  2754. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2755. [](common_params & params, const std::string & mask) {
  2756. params.speculative.cpuparams.mask_valid = true;
  2757. if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) {
  2758. throw std::invalid_argument("invalid cpumask");
  2759. }
  2760. }
  2761. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2762. add_opt(common_arg(
  2763. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  2764. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  2765. [](common_params & params, const std::string & range) {
  2766. params.speculative.cpuparams.mask_valid = true;
  2767. if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) {
  2768. throw std::invalid_argument("invalid range");
  2769. }
  2770. }
  2771. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2772. add_opt(common_arg(
  2773. {"--cpu-strict-draft"}, "<0|1>",
  2774. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  2775. [](common_params & params, int value) {
  2776. params.speculative.cpuparams.strict_cpu = value;
  2777. }
  2778. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2779. add_opt(common_arg(
  2780. {"--prio-draft"}, "N",
  2781. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority),
  2782. [](common_params & params, int prio) {
  2783. if (prio < 0 || prio > 3) {
  2784. throw std::invalid_argument("invalid value");
  2785. }
  2786. params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio;
  2787. }
  2788. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2789. add_opt(common_arg(
  2790. {"--poll-draft"}, "<0|1>",
  2791. "Use polling to wait for draft model work (default: same as --poll])",
  2792. [](common_params & params, int value) {
  2793. params.speculative.cpuparams.poll = value;
  2794. }
  2795. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2796. add_opt(common_arg(
  2797. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  2798. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2799. [](common_params & params, const std::string & mask) {
  2800. params.speculative.cpuparams_batch.mask_valid = true;
  2801. if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) {
  2802. throw std::invalid_argument("invalid cpumask");
  2803. }
  2804. }
  2805. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2806. add_opt(common_arg(
  2807. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  2808. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  2809. [](common_params & params, const std::string & range) {
  2810. params.speculative.cpuparams_batch.mask_valid = true;
  2811. if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) {
  2812. throw std::invalid_argument("invalid cpumask");
  2813. }
  2814. }
  2815. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2816. add_opt(common_arg(
  2817. {"--cpu-strict-batch-draft"}, "<0|1>",
  2818. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  2819. [](common_params & params, int value) {
  2820. params.speculative.cpuparams_batch.strict_cpu = value;
  2821. }
  2822. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2823. add_opt(common_arg(
  2824. {"--prio-batch-draft"}, "N",
  2825. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority),
  2826. [](common_params & params, int prio) {
  2827. if (prio < 0 || prio > 3) {
  2828. throw std::invalid_argument("invalid value");
  2829. }
  2830. params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  2831. }
  2832. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2833. add_opt(common_arg(
  2834. {"--poll-batch-draft"}, "<0|1>",
  2835. "Use polling to wait for draft model work (default: --poll-draft)",
  2836. [](common_params & params, int value) {
  2837. params.speculative.cpuparams_batch.poll = value;
  2838. }
  2839. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2840. add_opt(common_arg(
  2841. {"--draft-max", "--draft", "--draft-n"}, "N",
  2842. string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max),
  2843. [](common_params & params, int value) {
  2844. params.speculative.n_max = value;
  2845. }
  2846. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_DRAFT_MAX"));
  2847. add_opt(common_arg(
  2848. {"--draft-min", "--draft-n-min"}, "N",
  2849. string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min),
  2850. [](common_params & params, int value) {
  2851. params.speculative.n_min = value;
  2852. }
  2853. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_DRAFT_MIN"));
  2854. add_opt(common_arg(
  2855. {"--draft-p-split"}, "P",
  2856. string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split),
  2857. [](common_params & params, const std::string & value) {
  2858. params.speculative.p_split = std::stof(value);
  2859. }
  2860. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT"));
  2861. add_opt(common_arg(
  2862. {"--draft-p-min"}, "P",
  2863. string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min),
  2864. [](common_params & params, const std::string & value) {
  2865. params.speculative.p_min = std::stof(value);
  2866. }
  2867. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_DRAFT_P_MIN"));
  2868. add_opt(common_arg(
  2869. {"-cd", "--ctx-size-draft"}, "N",
  2870. string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx),
  2871. [](common_params & params, int value) {
  2872. params.speculative.n_ctx = value;
  2873. }
  2874. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT"));
  2875. add_opt(common_arg(
  2876. {"-devd", "--device-draft"}, "<dev1,dev2,..>",
  2877. "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n"
  2878. "use --list-devices to see a list of available devices",
  2879. [](common_params & params, const std::string & value) {
  2880. params.speculative.devices = parse_device_list(value);
  2881. }
  2882. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  2883. add_opt(common_arg(
  2884. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  2885. "number of layers to store in VRAM for the draft model",
  2886. [](common_params & params, int value) {
  2887. params.speculative.n_gpu_layers = value;
  2888. if (!llama_supports_gpu_offload()) {
  2889. fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
  2890. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2891. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2892. }
  2893. }
  2894. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT"));
  2895. add_opt(common_arg(
  2896. {"-md", "--model-draft"}, "FNAME",
  2897. "draft model for speculative decoding (default: unused)",
  2898. [](common_params & params, const std::string & value) {
  2899. params.speculative.model.path = value;
  2900. }
  2901. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}).set_env("LLAMA_ARG_MODEL_DRAFT"));
  2902. add_opt(common_arg(
  2903. {"--spec-replace"}, "TARGET", "DRAFT",
  2904. "translate the string in TARGET into DRAFT if the draft model and main model are not compatible",
  2905. [](common_params & params, const std::string & tgt, const std::string & dft) {
  2906. params.speculative.replacements.push_back({ tgt, dft });
  2907. }
  2908. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  2909. add_opt(common_arg(
  2910. {"-ctkd", "--cache-type-k-draft"}, "TYPE",
  2911. string_format(
  2912. "KV cache data type for K for the draft model\n"
  2913. "allowed values: %s\n"
  2914. "(default: %s)",
  2915. get_all_kv_cache_types().c_str(),
  2916. ggml_type_name(params.speculative.cache_type_k)
  2917. ),
  2918. [](common_params & params, const std::string & value) {
  2919. params.speculative.cache_type_k = kv_cache_type_from_str(value);
  2920. }
  2921. ).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT"));
  2922. add_opt(common_arg(
  2923. {"-ctvd", "--cache-type-v-draft"}, "TYPE",
  2924. string_format(
  2925. "KV cache data type for V for the draft model\n"
  2926. "allowed values: %s\n"
  2927. "(default: %s)",
  2928. get_all_kv_cache_types().c_str(),
  2929. ggml_type_name(params.speculative.cache_type_v)
  2930. ),
  2931. [](common_params & params, const std::string & value) {
  2932. params.speculative.cache_type_v = kv_cache_type_from_str(value);
  2933. }
  2934. ).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT"));
  2935. add_opt(common_arg(
  2936. {"-mv", "--model-vocoder"}, "FNAME",
  2937. "vocoder model for audio generation (default: unused)",
  2938. [](common_params & params, const std::string & value) {
  2939. params.vocoder.model.path = value;
  2940. }
  2941. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2942. add_opt(common_arg(
  2943. {"--tts-use-guide-tokens"},
  2944. "Use guide tokens to improve TTS word recall",
  2945. [](common_params & params) {
  2946. params.vocoder.use_guide_tokens = true;
  2947. }
  2948. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2949. add_opt(common_arg(
  2950. {"--tts-speaker-file"}, "FNAME",
  2951. "speaker file path for audio generation",
  2952. [](common_params & params, const std::string & value) {
  2953. params.vocoder.speaker_file = value;
  2954. }
  2955. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2956. add_opt(common_arg(
  2957. {"--diffusion-steps"}, "N",
  2958. string_format("number of diffusion steps (default: %d)", params.diffusion.steps),
  2959. [](common_params & params, int value) { params.diffusion.steps = value; }
  2960. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2961. add_opt(common_arg(
  2962. {"--diffusion-visual"},
  2963. string_format("enable visual diffusion mode (show progressive generation) (default: %s)", params.diffusion.visual_mode ? "true" : "false"),
  2964. [](common_params & params) { params.diffusion.visual_mode = true; }
  2965. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2966. add_opt(common_arg(
  2967. {"--diffusion-eps"}, "F",
  2968. string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps),
  2969. [](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); }
  2970. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2971. add_opt(common_arg(
  2972. {"--diffusion-algorithm"}, "N",
  2973. string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)", params.diffusion.algorithm),
  2974. [](common_params & params, int value) { params.diffusion.algorithm = value; }
  2975. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2976. add_opt(common_arg(
  2977. {"--diffusion-alg-temp"}, "F",
  2978. string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
  2979. [](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); }
  2980. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2981. add_opt(common_arg(
  2982. {"--diffusion-block-length"}, "N",
  2983. string_format("llada block length for generation (default: %d)", params.diffusion.block_length),
  2984. [](common_params & params, int value) { params.diffusion.block_length = value; }
  2985. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2986. add_opt(common_arg(
  2987. {"--diffusion-cfg-scale"}, "F",
  2988. string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale),
  2989. [](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); }
  2990. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2991. add_opt(common_arg(
  2992. {"--diffusion-add-gumbel-noise"}, "F",
  2993. string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"),
  2994. [](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); }
  2995. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2996. add_opt(common_arg(
  2997. { "-lr", "--learning-rate" }, "ALPHA",
  2998. string_format("adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)", (double) params.lr.lr0),
  2999. [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); }
  3000. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3001. add_opt(common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA",
  3002. string_format("(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)",
  3003. (double) params.lr.lr_min),
  3004. [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); }
  3005. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3006. add_opt(common_arg(
  3007. {"-decay-epochs", "--learning-rate-decay-epochs"}, "ALPHA",
  3008. string_format("(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)", (double) params.lr.decay_epochs),
  3009. [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); }
  3010. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3011. add_opt(common_arg(
  3012. {"-wd", "--weight-decay"}, "WD",
  3013. string_format("adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).", (double) params.lr.wd),
  3014. [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); }
  3015. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3016. add_opt(common_arg(
  3017. {"-val-split", "--val-split"}, "FRACTION",
  3018. string_format("fraction of data to use as validation set for training (default: %.2g).", (double) params.val_split),
  3019. [](common_params & params, const std::string & value) { params.val_split = std::stof(value); }
  3020. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3021. add_opt(common_arg(
  3022. {"-epochs", "--epochs"}, "N",
  3023. string_format("optimizer max # of epochs (default: %d)", params.lr.epochs),
  3024. [](common_params & params, int epochs) { params.lr.epochs = epochs; }
  3025. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3026. add_opt(common_arg(
  3027. {"-opt", "--optimizer"}, "sgd|adamw", "adamw or sgd",
  3028. [](common_params & params, const std::string & name) {
  3029. params.optimizer = common_opt_get_optimizer(name.c_str());
  3030. if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) {
  3031. throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd");
  3032. }
  3033. }
  3034. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  3035. // presets
  3036. add_opt(common_arg(
  3037. {"--tts-oute-default"},
  3038. string_format("use default OuteTTS models (note: can download weights from the internet)"),
  3039. [](common_params & params) {
  3040. params.model.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
  3041. params.model.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
  3042. params.vocoder.model.hf_repo = "ggml-org/WavTokenizer";
  3043. params.vocoder.model.hf_file = "WavTokenizer-Large-75-F16.gguf";
  3044. }
  3045. ).set_examples({LLAMA_EXAMPLE_TTS}));
  3046. add_opt(common_arg(
  3047. {"--embd-gemma-default"},
  3048. string_format("use default EmbeddingGemma model (note: can download weights from the internet)"),
  3049. [](common_params & params) {
  3050. params.model.hf_repo = "ggml-org/embeddinggemma-300M-qat-q4_0-GGUF";
  3051. params.model.hf_file = "embeddinggemma-300M-qat-Q4_0.gguf";
  3052. params.port = 8011;
  3053. params.n_ubatch = 2048;
  3054. params.n_batch = 2048;
  3055. params.n_parallel = 32;
  3056. params.n_ctx = 2048*params.n_parallel;
  3057. params.verbose_prompt = true;
  3058. params.embedding = true;
  3059. }
  3060. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3061. add_opt(common_arg(
  3062. {"--fim-qwen-1.5b-default"},
  3063. string_format("use default Qwen 2.5 Coder 1.5B (note: can download weights from the internet)"),
  3064. [](common_params & params) {
  3065. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
  3066. params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
  3067. params.port = 8012;
  3068. params.n_ubatch = 1024;
  3069. params.n_batch = 1024;
  3070. params.n_ctx = 0;
  3071. params.n_cache_reuse = 256;
  3072. }
  3073. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3074. add_opt(common_arg(
  3075. {"--fim-qwen-3b-default"},
  3076. string_format("use default Qwen 2.5 Coder 3B (note: can download weights from the internet)"),
  3077. [](common_params & params) {
  3078. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
  3079. params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
  3080. params.port = 8012;
  3081. params.n_ubatch = 1024;
  3082. params.n_batch = 1024;
  3083. params.n_ctx = 0;
  3084. params.n_cache_reuse = 256;
  3085. }
  3086. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3087. add_opt(common_arg(
  3088. {"--fim-qwen-7b-default"},
  3089. string_format("use default Qwen 2.5 Coder 7B (note: can download weights from the internet)"),
  3090. [](common_params & params) {
  3091. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3092. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3093. params.port = 8012;
  3094. params.n_ubatch = 1024;
  3095. params.n_batch = 1024;
  3096. params.n_ctx = 0;
  3097. params.n_cache_reuse = 256;
  3098. }
  3099. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3100. add_opt(common_arg(
  3101. {"--fim-qwen-7b-spec"},
  3102. string_format("use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3103. [](common_params & params) {
  3104. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3105. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3106. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3107. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3108. params.port = 8012;
  3109. params.n_ubatch = 1024;
  3110. params.n_batch = 1024;
  3111. params.n_ctx = 0;
  3112. params.n_cache_reuse = 256;
  3113. }
  3114. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3115. add_opt(common_arg(
  3116. {"--fim-qwen-14b-spec"},
  3117. string_format("use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3118. [](common_params & params) {
  3119. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-14B-Q8_0-GGUF";
  3120. params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
  3121. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3122. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3123. params.port = 8012;
  3124. params.n_ubatch = 1024;
  3125. params.n_batch = 1024;
  3126. params.n_ctx = 0;
  3127. params.n_cache_reuse = 256;
  3128. }
  3129. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3130. add_opt(common_arg(
  3131. {"--fim-qwen-30b-default"},
  3132. string_format("use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet)"),
  3133. [](common_params & params) {
  3134. params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
  3135. params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
  3136. params.port = 8012;
  3137. params.n_ubatch = 1024;
  3138. params.n_batch = 1024;
  3139. params.n_ctx = 0;
  3140. params.n_cache_reuse = 256;
  3141. }
  3142. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3143. add_opt(common_arg(
  3144. {"--gpt-oss-20b-default"},
  3145. string_format("use gpt-oss-20b (note: can download weights from the internet)"),
  3146. [](common_params & params) {
  3147. params.model.hf_repo = "ggml-org/gpt-oss-20b-GGUF";
  3148. params.model.hf_file = "gpt-oss-20b-mxfp4.gguf";
  3149. params.port = 8013;
  3150. params.n_ubatch = 2048;
  3151. params.n_batch = 32768;
  3152. params.n_parallel = 2;
  3153. params.n_ctx = 131072*params.n_parallel;
  3154. params.sampling.temp = 1.0f;
  3155. params.sampling.top_p = 1.0f;
  3156. params.sampling.top_k = 0;
  3157. params.sampling.min_p = 0.01f;
  3158. params.use_jinja = true;
  3159. //params.default_template_kwargs["reasoning_effort"] = "\"high\"";
  3160. }
  3161. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  3162. add_opt(common_arg(
  3163. {"--gpt-oss-120b-default"},
  3164. string_format("use gpt-oss-120b (note: can download weights from the internet)"),
  3165. [](common_params & params) {
  3166. params.model.hf_repo = "ggml-org/gpt-oss-120b-GGUF";
  3167. params.port = 8013;
  3168. params.n_ubatch = 2048;
  3169. params.n_batch = 32768;
  3170. params.n_parallel = 2;
  3171. params.n_ctx = 131072*params.n_parallel;
  3172. params.sampling.temp = 1.0f;
  3173. params.sampling.top_p = 1.0f;
  3174. params.sampling.top_k = 0;
  3175. params.sampling.min_p = 0.01f;
  3176. params.use_jinja = true;
  3177. //params.default_template_kwargs["reasoning_effort"] = "\"high\"";
  3178. }
  3179. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  3180. add_opt(common_arg(
  3181. {"--vision-gemma-4b-default"},
  3182. string_format("use Gemma 3 4B QAT (note: can download weights from the internet)"),
  3183. [](common_params & params) {
  3184. params.model.hf_repo = "ggml-org/gemma-3-4b-it-qat-GGUF";
  3185. params.port = 8014;
  3186. params.n_ctx = 0;
  3187. params.use_jinja = true;
  3188. }
  3189. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  3190. add_opt(common_arg(
  3191. {"--vision-gemma-12b-default"},
  3192. string_format("use Gemma 3 12B QAT (note: can download weights from the internet)"),
  3193. [](common_params & params) {
  3194. params.model.hf_repo = "ggml-org/gemma-3-12b-it-qat-GGUF";
  3195. params.port = 8014;
  3196. params.n_ctx = 0;
  3197. params.use_jinja = true;
  3198. }
  3199. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
  3200. return ctx_arg;
  3201. }