arg.cpp 141 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212
  1. #include "arg.h"
  2. #include "chat.h"
  3. #include "common.h"
  4. #include "json-schema-to-grammar.h"
  5. #include "log.h"
  6. #include "sampling.h"
  7. #include "download.h"
  8. // fix problem with std::min and std::max
  9. #if defined(_WIN32)
  10. #define WIN32_LEAN_AND_MEAN
  11. #ifndef NOMINMAX
  12. # define NOMINMAX
  13. #endif
  14. #include <windows.h>
  15. #endif
  16. #define JSON_ASSERT GGML_ASSERT
  17. #include <nlohmann/json.hpp>
  18. #include <algorithm>
  19. #include <climits>
  20. #include <cstdarg>
  21. #include <fstream>
  22. #include <list>
  23. #include <regex>
  24. #include <set>
  25. #include <string>
  26. #include <thread> // for hardware_concurrency
  27. #include <vector>
  28. #ifdef __linux__
  29. #include <linux/limits.h>
  30. #elif defined(_WIN32)
  31. # if !defined(PATH_MAX)
  32. # define PATH_MAX MAX_PATH
  33. # endif
  34. #elif defined(_AIX)
  35. #include <sys/limits.h>
  36. #else
  37. #include <sys/syslimits.h>
  38. #endif
  39. #define LLAMA_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
  40. using json = nlohmann::ordered_json;
  41. static std::initializer_list<enum llama_example> mmproj_examples = {
  42. LLAMA_EXAMPLE_MTMD,
  43. LLAMA_EXAMPLE_SERVER,
  44. };
  45. static std::string read_file(const std::string & fname) {
  46. std::ifstream file(fname);
  47. if (!file) {
  48. throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
  49. }
  50. std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
  51. file.close();
  52. return content;
  53. }
  54. common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  55. this->examples = examples;
  56. return *this;
  57. }
  58. common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
  59. this->excludes = excludes;
  60. return *this;
  61. }
  62. common_arg & common_arg::set_env(const char * env) {
  63. help = help + "\n(env: " + env + ")";
  64. this->env = env;
  65. return *this;
  66. }
  67. common_arg & common_arg::set_sparam() {
  68. is_sparam = true;
  69. return *this;
  70. }
  71. bool common_arg::in_example(enum llama_example ex) {
  72. return examples.find(ex) != examples.end();
  73. }
  74. bool common_arg::is_exclude(enum llama_example ex) {
  75. return excludes.find(ex) != excludes.end();
  76. }
  77. bool common_arg::get_value_from_env(std::string & output) const {
  78. if (env == nullptr) return false;
  79. char * value = std::getenv(env);
  80. if (value) {
  81. output = value;
  82. return true;
  83. }
  84. return false;
  85. }
  86. bool common_arg::has_value_from_env() const {
  87. return env != nullptr && std::getenv(env);
  88. }
  89. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  90. std::vector<std::string> result;
  91. std::istringstream iss(input);
  92. std::string line;
  93. auto add_line = [&](const std::string& l) {
  94. if (l.length() <= max_char_per_line) {
  95. result.push_back(l);
  96. } else {
  97. std::istringstream line_stream(l);
  98. std::string word, current_line;
  99. while (line_stream >> word) {
  100. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  101. if (!current_line.empty()) result.push_back(current_line);
  102. current_line = word;
  103. } else {
  104. current_line += (!current_line.empty() ? " " : "") + word;
  105. }
  106. }
  107. if (!current_line.empty()) result.push_back(current_line);
  108. }
  109. };
  110. while (std::getline(iss, line)) {
  111. add_line(line);
  112. }
  113. return result;
  114. }
  115. std::string common_arg::to_string() {
  116. // params for printing to console
  117. const static int n_leading_spaces = 40;
  118. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  119. std::string leading_spaces(n_leading_spaces, ' ');
  120. std::ostringstream ss;
  121. for (const auto arg : args) {
  122. if (arg == args.front()) {
  123. if (args.size() == 1) {
  124. ss << arg;
  125. } else {
  126. // first arg is usually abbreviation, we need padding to make it more beautiful
  127. auto tmp = std::string(arg) + ", ";
  128. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  129. ss << tmp << spaces;
  130. }
  131. } else {
  132. ss << arg << (arg != args.back() ? ", " : "");
  133. }
  134. }
  135. if (value_hint) ss << " " << value_hint;
  136. if (value_hint_2) ss << " " << value_hint_2;
  137. if (ss.tellp() > n_leading_spaces - 3) {
  138. // current line is too long, add new line
  139. ss << "\n" << leading_spaces;
  140. } else {
  141. // padding between arg and help, same line
  142. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  143. }
  144. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  145. for (const auto & line : help_lines) {
  146. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  147. }
  148. return ss.str();
  149. }
  150. //
  151. // utils
  152. //
  153. // Helper function to parse tensor buffer override strings
  154. static void parse_tensor_buffer_overrides(const std::string & value, std::vector<llama_model_tensor_buft_override> & overrides) {
  155. std::map<std::string, ggml_backend_buffer_type_t> buft_list;
  156. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  157. auto * dev = ggml_backend_dev_get(i);
  158. auto * buft = ggml_backend_dev_buffer_type(dev);
  159. if (buft) {
  160. buft_list[ggml_backend_buft_name(buft)] = buft;
  161. }
  162. }
  163. for (const auto & override : string_split<std::string>(value, ',')) {
  164. std::string::size_type pos = override.find('=');
  165. if (pos == std::string::npos) {
  166. throw std::invalid_argument("invalid value");
  167. }
  168. std::string tensor_name = override.substr(0, pos);
  169. std::string buffer_type = override.substr(pos + 1);
  170. if (buft_list.find(buffer_type) == buft_list.end()) {
  171. printf("Available buffer types:\n");
  172. for (const auto & it : buft_list) {
  173. printf(" %s\n", ggml_backend_buft_name(it.second));
  174. }
  175. throw std::invalid_argument("unknown buffer type");
  176. }
  177. // keep strings alive and avoid leaking memory by storing them in a static vector
  178. static std::list<std::string> buft_overrides;
  179. buft_overrides.push_back(tensor_name);
  180. overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
  181. }
  182. }
  183. struct handle_model_result {
  184. bool found_mmproj = false;
  185. common_params_model mmproj;
  186. };
  187. static handle_model_result common_params_handle_model(
  188. struct common_params_model & model,
  189. const std::string & bearer_token,
  190. bool offline) {
  191. handle_model_result result;
  192. // handle pre-fill default model path and url based on hf_repo and hf_file
  193. {
  194. if (!model.docker_repo.empty()) { // Handle Docker URLs by resolving them to local paths
  195. model.path = common_docker_resolve_model(model.docker_repo);
  196. model.name = model.docker_repo; // set name for consistency
  197. } else if (!model.hf_repo.empty()) {
  198. // short-hand to avoid specifying --hf-file -> default it to --model
  199. if (model.hf_file.empty()) {
  200. if (model.path.empty()) {
  201. auto auto_detected = common_get_hf_file(model.hf_repo, bearer_token, offline);
  202. if (auto_detected.repo.empty() || auto_detected.ggufFile.empty()) {
  203. exit(1); // built without CURL, error message already printed
  204. }
  205. model.name = model.hf_repo; // repo name with tag
  206. model.hf_repo = auto_detected.repo; // repo name without tag
  207. model.hf_file = auto_detected.ggufFile;
  208. if (!auto_detected.mmprojFile.empty()) {
  209. result.found_mmproj = true;
  210. result.mmproj.hf_repo = model.hf_repo;
  211. result.mmproj.hf_file = auto_detected.mmprojFile;
  212. }
  213. } else {
  214. model.hf_file = model.path;
  215. }
  216. }
  217. std::string model_endpoint = get_model_endpoint();
  218. model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
  219. // make sure model path is present (for caching purposes)
  220. if (model.path.empty()) {
  221. // this is to avoid different repo having same file name, or same file name in different subdirs
  222. std::string filename = model.hf_repo + "_" + model.hf_file;
  223. // to make sure we don't have any slashes in the filename
  224. string_replace_all(filename, "/", "_");
  225. model.path = fs_get_cache_file(filename);
  226. }
  227. } else if (!model.url.empty()) {
  228. if (model.path.empty()) {
  229. auto f = string_split<std::string>(model.url, '#').front();
  230. f = string_split<std::string>(f, '?').front();
  231. model.path = fs_get_cache_file(string_split<std::string>(f, '/').back());
  232. }
  233. }
  234. }
  235. // then, download it if needed
  236. if (!model.url.empty()) {
  237. bool ok = common_download_model(model, bearer_token, offline);
  238. if (!ok) {
  239. LOG_ERR("error: failed to download model from %s\n", model.url.c_str());
  240. exit(1);
  241. }
  242. }
  243. return result;
  244. }
  245. const std::vector<ggml_type> kv_cache_types = {
  246. GGML_TYPE_F32,
  247. GGML_TYPE_F16,
  248. GGML_TYPE_BF16,
  249. GGML_TYPE_Q8_0,
  250. GGML_TYPE_Q4_0,
  251. GGML_TYPE_Q4_1,
  252. GGML_TYPE_IQ4_NL,
  253. GGML_TYPE_Q5_0,
  254. GGML_TYPE_Q5_1,
  255. };
  256. static ggml_type kv_cache_type_from_str(const std::string & s) {
  257. for (const auto & type : kv_cache_types) {
  258. if (ggml_type_name(type) == s) {
  259. return type;
  260. }
  261. }
  262. throw std::runtime_error("Unsupported cache type: " + s);
  263. }
  264. static std::string get_all_kv_cache_types() {
  265. std::ostringstream msg;
  266. for (const auto & type : kv_cache_types) {
  267. msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
  268. }
  269. return msg.str();
  270. }
  271. //
  272. // CLI argument parsing functions
  273. //
  274. static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) {
  275. common_params & params = ctx_arg.params;
  276. std::unordered_map<std::string, common_arg *> arg_to_options;
  277. for (auto & opt : ctx_arg.options) {
  278. for (const auto & arg : opt.args) {
  279. arg_to_options[arg] = &opt;
  280. }
  281. }
  282. // handle environment variables
  283. for (auto & opt : ctx_arg.options) {
  284. std::string value;
  285. if (opt.get_value_from_env(value)) {
  286. try {
  287. if (opt.handler_void && (value == "1" || value == "true")) {
  288. opt.handler_void(params);
  289. }
  290. if (opt.handler_int) {
  291. opt.handler_int(params, std::stoi(value));
  292. }
  293. if (opt.handler_string) {
  294. opt.handler_string(params, value);
  295. continue;
  296. }
  297. } catch (std::exception & e) {
  298. throw std::invalid_argument(string_format(
  299. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  300. }
  301. }
  302. }
  303. // handle command line arguments
  304. auto check_arg = [&](int i) {
  305. if (i+1 >= argc) {
  306. throw std::invalid_argument("expected value for argument");
  307. }
  308. };
  309. for (int i = 1; i < argc; i++) {
  310. const std::string arg_prefix = "--";
  311. std::string arg = argv[i];
  312. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  313. std::replace(arg.begin(), arg.end(), '_', '-');
  314. }
  315. if (arg_to_options.find(arg) == arg_to_options.end()) {
  316. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  317. }
  318. auto opt = *arg_to_options[arg];
  319. if (opt.has_value_from_env()) {
  320. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  321. }
  322. try {
  323. if (opt.handler_void) {
  324. opt.handler_void(params);
  325. continue;
  326. }
  327. // arg with single value
  328. check_arg(i);
  329. std::string val = argv[++i];
  330. if (opt.handler_int) {
  331. opt.handler_int(params, std::stoi(val));
  332. continue;
  333. }
  334. if (opt.handler_string) {
  335. opt.handler_string(params, val);
  336. continue;
  337. }
  338. // arg with 2 values
  339. check_arg(i);
  340. std::string val2 = argv[++i];
  341. if (opt.handler_str_str) {
  342. opt.handler_str_str(params, val, val2);
  343. continue;
  344. }
  345. } catch (std::exception & e) {
  346. throw std::invalid_argument(string_format(
  347. "error while handling argument \"%s\": %s\n\n"
  348. "usage:\n%s\n\nto show complete usage, run with -h",
  349. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  350. }
  351. }
  352. postprocess_cpu_params(params.cpuparams, nullptr);
  353. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  354. postprocess_cpu_params(params.speculative.cpuparams, &params.cpuparams);
  355. postprocess_cpu_params(params.speculative.cpuparams_batch, &params.cpuparams_batch);
  356. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  357. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  358. }
  359. // handle model and download
  360. {
  361. auto res = common_params_handle_model(params.model, params.hf_token, params.offline);
  362. if (params.no_mmproj) {
  363. params.mmproj = {};
  364. } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) {
  365. // optionally, handle mmproj model when -hf is specified
  366. params.mmproj = res.mmproj;
  367. }
  368. // only download mmproj if the current example is using it
  369. for (auto & ex : mmproj_examples) {
  370. if (ctx_arg.ex == ex) {
  371. common_params_handle_model(params.mmproj, params.hf_token, params.offline);
  372. break;
  373. }
  374. }
  375. common_params_handle_model(params.speculative.model, params.hf_token, params.offline);
  376. common_params_handle_model(params.vocoder.model, params.hf_token, params.offline);
  377. }
  378. // model is required (except for server)
  379. // TODO @ngxson : maybe show a list of available models in CLI in this case
  380. if (params.model.path.empty() && ctx_arg.ex != LLAMA_EXAMPLE_SERVER) {
  381. throw std::invalid_argument("error: --model is required\n");
  382. }
  383. if (params.escape) {
  384. string_process_escapes(params.prompt);
  385. string_process_escapes(params.input_prefix);
  386. string_process_escapes(params.input_suffix);
  387. for (auto & antiprompt : params.antiprompt) {
  388. string_process_escapes(antiprompt);
  389. }
  390. for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
  391. string_process_escapes(seq_breaker);
  392. }
  393. for (auto & pair : params.speculative.replacements) {
  394. string_process_escapes(pair.first);
  395. string_process_escapes(pair.second);
  396. }
  397. }
  398. if (!params.kv_overrides.empty()) {
  399. params.kv_overrides.emplace_back();
  400. params.kv_overrides.back().key[0] = 0;
  401. }
  402. if (!params.tensor_buft_overrides.empty()) {
  403. params.tensor_buft_overrides.push_back({nullptr, nullptr});
  404. }
  405. if (!params.speculative.tensor_buft_overrides.empty()) {
  406. params.speculative.tensor_buft_overrides.push_back({nullptr, nullptr});
  407. }
  408. if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
  409. throw std::runtime_error(string_format(
  410. "error: the supplied chat template is not supported: %s%s\n",
  411. params.chat_template.c_str(),
  412. params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
  413. ));
  414. }
  415. return true;
  416. }
  417. static void common_params_print_usage(common_params_context & ctx_arg) {
  418. auto print_options = [](std::vector<common_arg *> & options) {
  419. for (common_arg * opt : options) {
  420. printf("%s", opt->to_string().c_str());
  421. }
  422. };
  423. std::vector<common_arg *> common_options;
  424. std::vector<common_arg *> sparam_options;
  425. std::vector<common_arg *> specific_options;
  426. for (auto & opt : ctx_arg.options) {
  427. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  428. if (opt.is_sparam) {
  429. sparam_options.push_back(&opt);
  430. } else if (opt.in_example(ctx_arg.ex)) {
  431. specific_options.push_back(&opt);
  432. } else {
  433. common_options.push_back(&opt);
  434. }
  435. }
  436. printf("----- common params -----\n\n");
  437. print_options(common_options);
  438. printf("\n\n----- sampling params -----\n\n");
  439. print_options(sparam_options);
  440. // TODO: maybe convert enum llama_example to string
  441. printf("\n\n----- example-specific params -----\n\n");
  442. print_options(specific_options);
  443. }
  444. static void common_params_print_completion(common_params_context & ctx_arg) {
  445. std::vector<common_arg *> common_options;
  446. std::vector<common_arg *> sparam_options;
  447. std::vector<common_arg *> specific_options;
  448. for (auto & opt : ctx_arg.options) {
  449. if (opt.is_sparam) {
  450. sparam_options.push_back(&opt);
  451. } else if (opt.in_example(ctx_arg.ex)) {
  452. specific_options.push_back(&opt);
  453. } else {
  454. common_options.push_back(&opt);
  455. }
  456. }
  457. printf("_llama_completions() {\n");
  458. printf(" local cur prev opts\n");
  459. printf(" COMPREPLY=()\n");
  460. printf(" cur=\"${COMP_WORDS[COMP_CWORD]}\"\n");
  461. printf(" prev=\"${COMP_WORDS[COMP_CWORD-1]}\"\n\n");
  462. printf(" opts=\"");
  463. auto print_options = [](const std::vector<common_arg *> & options) {
  464. for (const common_arg * opt : options) {
  465. for (const char * arg : opt->args) {
  466. printf("%s ", arg);
  467. }
  468. }
  469. };
  470. print_options(common_options);
  471. print_options(sparam_options);
  472. print_options(specific_options);
  473. printf("\"\n\n");
  474. printf(" case \"$prev\" in\n");
  475. printf(" --model|-m)\n");
  476. printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  477. printf(" return 0\n");
  478. printf(" ;;\n");
  479. printf(" --grammar-file)\n");
  480. printf(" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  481. printf(" return 0\n");
  482. printf(" ;;\n");
  483. printf(" --chat-template-file)\n");
  484. printf(" COMPREPLY=( $(compgen -f -X '!*.jinja' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  485. printf(" return 0\n");
  486. printf(" ;;\n");
  487. printf(" *)\n");
  488. printf(" COMPREPLY=( $(compgen -W \"${opts}\" -- \"$cur\") )\n");
  489. printf(" return 0\n");
  490. printf(" ;;\n");
  491. printf(" esac\n");
  492. printf("}\n\n");
  493. std::set<std::string> executables = {
  494. "llama-batched",
  495. "llama-batched-bench",
  496. "llama-bench",
  497. "llama-cli",
  498. "llama-convert-llama2c-to-ggml",
  499. "llama-cvector-generator",
  500. "llama-embedding",
  501. "llama-eval-callback",
  502. "llama-export-lora",
  503. "llama-gen-docs",
  504. "llama-gguf",
  505. "llama-gguf-hash",
  506. "llama-gguf-split",
  507. "llama-gritlm",
  508. "llama-imatrix",
  509. "llama-infill",
  510. "llama-mtmd-cli",
  511. "llama-llava-clip-quantize-cli",
  512. "llama-lookahead",
  513. "llama-lookup",
  514. "llama-lookup-create",
  515. "llama-lookup-merge",
  516. "llama-lookup-stats",
  517. "llama-parallel",
  518. "llama-passkey",
  519. "llama-perplexity",
  520. "llama-q8dot",
  521. "llama-quantize",
  522. "llama-qwen2vl-cli",
  523. "llama-retrieval",
  524. "llama-run",
  525. "llama-save-load-state",
  526. "llama-server",
  527. "llama-simple",
  528. "llama-simple-chat",
  529. "llama-speculative",
  530. "llama-speculative-simple",
  531. "llama-tokenize",
  532. "llama-tts",
  533. "llama-vdot"
  534. };
  535. for (const auto& exe : executables) {
  536. printf("complete -F _llama_completions %s\n", exe.c_str());
  537. }
  538. }
  539. static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
  540. std::vector<ggml_backend_dev_t> devices;
  541. auto dev_names = string_split<std::string>(value, ',');
  542. if (dev_names.empty()) {
  543. throw std::invalid_argument("no devices specified");
  544. }
  545. if (dev_names.size() == 1 && dev_names[0] == "none") {
  546. devices.push_back(nullptr);
  547. } else {
  548. for (const auto & device : dev_names) {
  549. auto * dev = ggml_backend_dev_by_name(device.c_str());
  550. if (!dev || ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
  551. throw std::invalid_argument(string_format("invalid device: %s", device.c_str()));
  552. }
  553. devices.push_back(dev);
  554. }
  555. devices.push_back(nullptr);
  556. }
  557. return devices;
  558. }
  559. static void add_rpc_devices(const std::string & servers) {
  560. auto rpc_servers = string_split<std::string>(servers, ',');
  561. if (rpc_servers.empty()) {
  562. throw std::invalid_argument("no RPC servers specified");
  563. }
  564. ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
  565. if (!rpc_reg) {
  566. throw std::invalid_argument("failed to find RPC backend");
  567. }
  568. typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint);
  569. ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server");
  570. if (!ggml_backend_rpc_add_server_fn) {
  571. throw std::invalid_argument("failed to find RPC add server function");
  572. }
  573. for (const auto & server : rpc_servers) {
  574. auto reg = ggml_backend_rpc_add_server_fn(server.c_str());
  575. ggml_backend_register(reg);
  576. }
  577. }
  578. bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  579. auto ctx_arg = common_params_parser_init(params, ex, print_usage);
  580. const common_params params_org = ctx_arg.params; // the example can modify the default params
  581. try {
  582. if (!common_params_parse_ex(argc, argv, ctx_arg)) {
  583. ctx_arg.params = params_org;
  584. return false;
  585. }
  586. if (ctx_arg.params.usage) {
  587. common_params_print_usage(ctx_arg);
  588. if (ctx_arg.print_usage) {
  589. ctx_arg.print_usage(argc, argv);
  590. }
  591. exit(0);
  592. }
  593. if (ctx_arg.params.completion) {
  594. common_params_print_completion(ctx_arg);
  595. exit(0);
  596. }
  597. params.lr.init();
  598. } catch (const std::invalid_argument & ex) {
  599. fprintf(stderr, "%s\n", ex.what());
  600. ctx_arg.params = params_org;
  601. return false;
  602. } catch (std::exception & ex) {
  603. fprintf(stderr, "%s\n", ex.what());
  604. exit(1); // for other exceptions, we exit with status code 1
  605. }
  606. return true;
  607. }
  608. static std::string list_builtin_chat_templates() {
  609. std::vector<const char *> supported_tmpl;
  610. int32_t res = llama_chat_builtin_templates(nullptr, 0);
  611. supported_tmpl.resize(res);
  612. res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
  613. std::ostringstream msg;
  614. for (auto & tmpl : supported_tmpl) {
  615. msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", ");
  616. }
  617. return msg.str();
  618. }
  619. static bool is_truthy(const std::string & value) {
  620. return value == "on" || value == "enabled" || value == "1";
  621. }
  622. static bool is_falsey(const std::string & value) {
  623. return value == "off" || value == "disabled" || value == "0";
  624. }
  625. static bool is_autoy(const std::string & value) {
  626. return value == "auto" || value == "-1";
  627. }
  628. common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  629. // default values specific to example
  630. // note: we place it here instead of inside server.cpp to allow llama-gen-docs to pick it up
  631. if (ex == LLAMA_EXAMPLE_SERVER) {
  632. params.use_jinja = true;
  633. }
  634. // load dynamic backends
  635. ggml_backend_load_all();
  636. common_params_context ctx_arg(params);
  637. ctx_arg.print_usage = print_usage;
  638. ctx_arg.ex = ex;
  639. std::string sampler_type_chars;
  640. std::string sampler_type_names;
  641. for (const auto & sampler : params.sampling.samplers) {
  642. sampler_type_chars += common_sampler_type_to_chr(sampler);
  643. sampler_type_names += common_sampler_type_to_str(sampler) + ";";
  644. }
  645. sampler_type_names.pop_back();
  646. /**
  647. * filter options by example
  648. * rules:
  649. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  650. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  651. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  652. */
  653. auto add_opt = [&](common_arg arg) {
  654. if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
  655. ctx_arg.options.push_back(std::move(arg));
  656. }
  657. };
  658. add_opt(common_arg(
  659. {"-h", "--help", "--usage"},
  660. "print usage and exit",
  661. [](common_params & params) {
  662. params.usage = true;
  663. }
  664. ));
  665. add_opt(common_arg(
  666. {"--version"},
  667. "show version and build info",
  668. [](common_params &) {
  669. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  670. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  671. exit(0);
  672. }
  673. ));
  674. add_opt(common_arg(
  675. {"-cl", "--cache-list"},
  676. "show list of models in cache",
  677. [](common_params &) {
  678. printf("model cache directory: %s\n", fs_get_cache_directory().c_str());
  679. auto models = common_list_cached_models();
  680. printf("number of models in cache: %zu\n", models.size());
  681. for (size_t i = 0; i < models.size(); i++) {
  682. auto & model = models[i];
  683. printf("%4d. %s\n", (int) i + 1, model.to_string().c_str());
  684. }
  685. exit(0);
  686. }
  687. ));
  688. add_opt(common_arg(
  689. {"--completion-bash"},
  690. "print source-able bash completion script for llama.cpp",
  691. [](common_params & params) {
  692. params.completion = true;
  693. }
  694. ));
  695. add_opt(common_arg(
  696. {"--verbose-prompt"},
  697. string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  698. [](common_params & params) {
  699. params.verbose_prompt = true;
  700. }
  701. ));
  702. add_opt(common_arg(
  703. {"--no-display-prompt"},
  704. string_format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  705. [](common_params & params) {
  706. params.display_prompt = false;
  707. }
  708. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  709. add_opt(common_arg(
  710. {"-co", "--color"},
  711. string_format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"),
  712. [](common_params & params) {
  713. params.use_color = true;
  714. }
  715. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  716. add_opt(common_arg(
  717. {"-t", "--threads"}, "N",
  718. string_format("number of CPU threads to use during generation (default: %d)", params.cpuparams.n_threads),
  719. [](common_params & params, int value) {
  720. params.cpuparams.n_threads = value;
  721. if (params.cpuparams.n_threads <= 0) {
  722. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  723. }
  724. }
  725. ).set_env("LLAMA_ARG_THREADS"));
  726. add_opt(common_arg(
  727. {"-tb", "--threads-batch"}, "N",
  728. "number of threads to use during batch and prompt processing (default: same as --threads)",
  729. [](common_params & params, int value) {
  730. params.cpuparams_batch.n_threads = value;
  731. if (params.cpuparams_batch.n_threads <= 0) {
  732. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  733. }
  734. }
  735. ));
  736. add_opt(common_arg(
  737. {"-C", "--cpu-mask"}, "M",
  738. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  739. [](common_params & params, const std::string & mask) {
  740. params.cpuparams.mask_valid = true;
  741. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  742. throw std::invalid_argument("invalid cpumask");
  743. }
  744. }
  745. ));
  746. add_opt(common_arg(
  747. {"-Cr", "--cpu-range"}, "lo-hi",
  748. "range of CPUs for affinity. Complements --cpu-mask",
  749. [](common_params & params, const std::string & range) {
  750. params.cpuparams.mask_valid = true;
  751. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  752. throw std::invalid_argument("invalid range");
  753. }
  754. }
  755. ));
  756. add_opt(common_arg(
  757. {"--cpu-strict"}, "<0|1>",
  758. string_format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  759. [](common_params & params, const std::string & value) {
  760. params.cpuparams.strict_cpu = std::stoul(value);
  761. }
  762. ));
  763. add_opt(common_arg(
  764. {"--prio"}, "N",
  765. string_format("set process/thread priority : low(-1), normal(0), medium(1), high(2), realtime(3) (default: %d)\n", params.cpuparams.priority),
  766. [](common_params & params, int prio) {
  767. if (prio < GGML_SCHED_PRIO_LOW || prio > GGML_SCHED_PRIO_REALTIME) {
  768. throw std::invalid_argument("invalid value");
  769. }
  770. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  771. }
  772. ));
  773. add_opt(common_arg(
  774. {"--poll"}, "<0...100>",
  775. string_format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  776. [](common_params & params, const std::string & value) {
  777. params.cpuparams.poll = std::stoul(value);
  778. }
  779. ));
  780. add_opt(common_arg(
  781. {"-Cb", "--cpu-mask-batch"}, "M",
  782. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  783. [](common_params & params, const std::string & mask) {
  784. params.cpuparams_batch.mask_valid = true;
  785. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  786. throw std::invalid_argument("invalid cpumask");
  787. }
  788. }
  789. ));
  790. add_opt(common_arg(
  791. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  792. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  793. [](common_params & params, const std::string & range) {
  794. params.cpuparams_batch.mask_valid = true;
  795. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  796. throw std::invalid_argument("invalid range");
  797. }
  798. }
  799. ));
  800. add_opt(common_arg(
  801. {"--cpu-strict-batch"}, "<0|1>",
  802. "use strict CPU placement (default: same as --cpu-strict)",
  803. [](common_params & params, int value) {
  804. params.cpuparams_batch.strict_cpu = value;
  805. }
  806. ));
  807. add_opt(common_arg(
  808. {"--prio-batch"}, "N",
  809. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  810. [](common_params & params, int prio) {
  811. if (prio < 0 || prio > 3) {
  812. throw std::invalid_argument("invalid value");
  813. }
  814. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  815. }
  816. ));
  817. add_opt(common_arg(
  818. {"--poll-batch"}, "<0|1>",
  819. "use polling to wait for work (default: same as --poll)",
  820. [](common_params & params, int value) {
  821. params.cpuparams_batch.poll = value;
  822. }
  823. ));
  824. add_opt(common_arg(
  825. {"-lcs", "--lookup-cache-static"}, "FNAME",
  826. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  827. [](common_params & params, const std::string & value) {
  828. params.lookup_cache_static = value;
  829. }
  830. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  831. add_opt(common_arg(
  832. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  833. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  834. [](common_params & params, const std::string & value) {
  835. params.lookup_cache_dynamic = value;
  836. }
  837. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  838. add_opt(common_arg(
  839. {"-c", "--ctx-size"}, "N",
  840. string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  841. [](common_params & params, int value) {
  842. params.n_ctx = value;
  843. }
  844. ).set_env("LLAMA_ARG_CTX_SIZE"));
  845. add_opt(common_arg(
  846. {"-n", "--predict", "--n-predict"}, "N",
  847. string_format(
  848. ex == LLAMA_EXAMPLE_MAIN
  849. ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
  850. : "number of tokens to predict (default: %d, -1 = infinity)",
  851. params.n_predict),
  852. [](common_params & params, int value) {
  853. params.n_predict = value;
  854. }
  855. ).set_env("LLAMA_ARG_N_PREDICT"));
  856. add_opt(common_arg(
  857. {"-b", "--batch-size"}, "N",
  858. string_format("logical maximum batch size (default: %d)", params.n_batch),
  859. [](common_params & params, int value) {
  860. params.n_batch = value;
  861. }
  862. ).set_env("LLAMA_ARG_BATCH"));
  863. add_opt(common_arg(
  864. {"-ub", "--ubatch-size"}, "N",
  865. string_format("physical maximum batch size (default: %d)", params.n_ubatch),
  866. [](common_params & params, int value) {
  867. params.n_ubatch = value;
  868. }
  869. ).set_env("LLAMA_ARG_UBATCH"));
  870. add_opt(common_arg(
  871. {"--keep"}, "N",
  872. string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  873. [](common_params & params, int value) {
  874. params.n_keep = value;
  875. }
  876. ));
  877. add_opt(common_arg(
  878. {"--swa-full"},
  879. string_format("use full-size SWA cache (default: %s)\n"
  880. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)", params.swa_full ? "true" : "false"),
  881. [](common_params & params) {
  882. params.swa_full = true;
  883. }
  884. ).set_env("LLAMA_ARG_SWA_FULL"));
  885. add_opt(common_arg(
  886. {"--ctx-checkpoints", "--swa-checkpoints"}, "N",
  887. string_format("max number of context checkpoints to create per slot (default: %d)\n"
  888. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/15293)", params.n_ctx_checkpoints),
  889. [](common_params & params, int value) {
  890. params.n_ctx_checkpoints = value;
  891. }
  892. ).set_env("LLAMA_ARG_CTX_CHECKPOINTS").set_examples({LLAMA_EXAMPLE_SERVER}));
  893. add_opt(common_arg(
  894. {"--cache-ram", "-cram"}, "N",
  895. string_format("set the maximum cache size in MiB (default: %d, -1 - no limit, 0 - disable)\n"
  896. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/16391)", params.cache_ram_mib),
  897. [](common_params & params, int value) {
  898. params.cache_ram_mib = value;
  899. }
  900. ).set_env("LLAMA_ARG_CACHE_RAM").set_examples({LLAMA_EXAMPLE_SERVER}));
  901. add_opt(common_arg(
  902. {"--kv-unified", "-kvu"},
  903. string_format("use single unified KV buffer for the KV cache of all sequences (default: %s)\n"
  904. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/14363)", params.kv_unified ? "true" : "false"),
  905. [](common_params & params) {
  906. params.kv_unified = true;
  907. }
  908. ).set_env("LLAMA_ARG_KV_UNIFIED"));
  909. add_opt(common_arg(
  910. {"--no-context-shift"},
  911. string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
  912. [](common_params & params) {
  913. params.ctx_shift = false;
  914. }
  915. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
  916. add_opt(common_arg(
  917. {"--context-shift"},
  918. string_format("enables context shift on infinite text generation (default: %s)", params.ctx_shift ? "enabled" : "disabled"),
  919. [](common_params & params) {
  920. params.ctx_shift = true;
  921. }
  922. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_CONTEXT_SHIFT"));
  923. add_opt(common_arg(
  924. {"--chunks"}, "N",
  925. string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  926. [](common_params & params, int value) {
  927. params.n_chunks = value;
  928. }
  929. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  930. add_opt(common_arg({ "-fa", "--flash-attn" }, "[on|off|auto]",
  931. string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')",
  932. llama_flash_attn_type_name(params.flash_attn_type)),
  933. [](common_params & params, const std::string & value) {
  934. if (is_truthy(value)) {
  935. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
  936. } else if (is_falsey(value)) {
  937. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
  938. } else if (is_autoy(value)) {
  939. params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
  940. } else {
  941. throw std::runtime_error(
  942. string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
  943. }
  944. }).set_env("LLAMA_ARG_FLASH_ATTN"));
  945. add_opt(common_arg(
  946. {"-p", "--prompt"}, "PROMPT",
  947. "prompt to start generation with; for system message, use -sys",
  948. [](common_params & params, const std::string & value) {
  949. params.prompt = value;
  950. }
  951. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  952. add_opt(common_arg(
  953. {"-sys", "--system-prompt"}, "PROMPT",
  954. "system prompt to use with model (if applicable, depending on chat template)",
  955. [](common_params & params, const std::string & value) {
  956. params.system_prompt = value;
  957. }
  958. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_DIFFUSION}));
  959. add_opt(common_arg(
  960. {"--no-perf"},
  961. string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
  962. [](common_params & params) {
  963. params.no_perf = true;
  964. params.sampling.no_perf = true;
  965. }
  966. ).set_env("LLAMA_ARG_NO_PERF"));
  967. add_opt(common_arg(
  968. {"-f", "--file"}, "FNAME",
  969. "a file containing the prompt (default: none)",
  970. [](common_params & params, const std::string & value) {
  971. params.prompt = read_file(value);
  972. // store the external file name in params
  973. params.prompt_file = value;
  974. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  975. params.prompt.pop_back();
  976. }
  977. }
  978. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  979. add_opt(common_arg(
  980. {"-sysf", "--system-prompt-file"}, "FNAME",
  981. "a file containing the system prompt (default: none)",
  982. [](common_params & params, const std::string & value) {
  983. params.system_prompt = read_file(value);
  984. if (!params.system_prompt.empty() && params.system_prompt.back() == '\n') {
  985. params.system_prompt.pop_back();
  986. }
  987. }
  988. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_DIFFUSION}));
  989. add_opt(common_arg(
  990. {"--in-file"}, "FNAME",
  991. "an input file (repeat to specify multiple files)",
  992. [](common_params & params, const std::string & value) {
  993. std::ifstream file(value);
  994. if (!file) {
  995. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  996. }
  997. params.in_files.push_back(value);
  998. }
  999. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1000. add_opt(common_arg(
  1001. {"-bf", "--binary-file"}, "FNAME",
  1002. "binary file containing the prompt (default: none)",
  1003. [](common_params & params, const std::string & value) {
  1004. std::ifstream file(value, std::ios::binary);
  1005. if (!file) {
  1006. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1007. }
  1008. // store the external file name in params
  1009. params.prompt_file = value;
  1010. std::ostringstream ss;
  1011. ss << file.rdbuf();
  1012. params.prompt = ss.str();
  1013. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  1014. }
  1015. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1016. add_opt(common_arg(
  1017. {"-e", "--escape"},
  1018. string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  1019. [](common_params & params) {
  1020. params.escape = true;
  1021. }
  1022. ));
  1023. add_opt(common_arg(
  1024. {"--no-escape"},
  1025. "do not process escape sequences",
  1026. [](common_params & params) {
  1027. params.escape = false;
  1028. }
  1029. ));
  1030. add_opt(common_arg(
  1031. {"-ptc", "--print-token-count"}, "N",
  1032. string_format("print token count every N tokens (default: %d)", params.n_print),
  1033. [](common_params & params, int value) {
  1034. params.n_print = value;
  1035. }
  1036. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1037. add_opt(common_arg(
  1038. {"--prompt-cache"}, "FNAME",
  1039. "file to cache prompt state for faster startup (default: none)",
  1040. [](common_params & params, const std::string & value) {
  1041. params.path_prompt_cache = value;
  1042. }
  1043. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1044. add_opt(common_arg(
  1045. {"--prompt-cache-all"},
  1046. "if specified, saves user input and generations to cache as well\n",
  1047. [](common_params & params) {
  1048. params.prompt_cache_all = true;
  1049. }
  1050. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1051. add_opt(common_arg(
  1052. {"--prompt-cache-ro"},
  1053. "if specified, uses the prompt cache but does not update it",
  1054. [](common_params & params) {
  1055. params.prompt_cache_ro = true;
  1056. }
  1057. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1058. add_opt(common_arg(
  1059. {"-r", "--reverse-prompt"}, "PROMPT",
  1060. "halt generation at PROMPT, return control in interactive mode\n",
  1061. [](common_params & params, const std::string & value) {
  1062. params.antiprompt.emplace_back(value);
  1063. }
  1064. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  1065. add_opt(common_arg(
  1066. {"-sp", "--special"},
  1067. string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  1068. [](common_params & params) {
  1069. params.special = true;
  1070. }
  1071. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  1072. add_opt(common_arg(
  1073. {"-cnv", "--conversation"},
  1074. "run in conversation mode:\n"
  1075. "- does not print special tokens and suffix/prefix\n"
  1076. "- interactive mode is also enabled\n"
  1077. "(default: auto enabled if chat template is available)",
  1078. [](common_params & params) {
  1079. params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
  1080. }
  1081. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1082. add_opt(common_arg(
  1083. {"-no-cnv", "--no-conversation"},
  1084. "force disable conversation mode (default: false)",
  1085. [](common_params & params) {
  1086. params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
  1087. }
  1088. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1089. add_opt(common_arg(
  1090. {"-st", "--single-turn"},
  1091. "run conversation for a single turn only, then exit when done\n"
  1092. "will not be interactive if first turn is predefined with --prompt\n"
  1093. "(default: false)",
  1094. [](common_params & params) {
  1095. params.single_turn = true;
  1096. }
  1097. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1098. add_opt(common_arg(
  1099. {"-i", "--interactive"},
  1100. string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  1101. [](common_params & params) {
  1102. params.interactive = true;
  1103. }
  1104. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1105. add_opt(common_arg(
  1106. {"-if", "--interactive-first"},
  1107. string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  1108. [](common_params & params) {
  1109. params.interactive_first = true;
  1110. }
  1111. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1112. add_opt(common_arg(
  1113. {"-mli", "--multiline-input"},
  1114. "allows you to write or paste multiple lines without ending each in '\\'",
  1115. [](common_params & params) {
  1116. params.multiline_input = true;
  1117. }
  1118. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1119. add_opt(common_arg(
  1120. {"--in-prefix-bos"},
  1121. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  1122. [](common_params & params) {
  1123. params.input_prefix_bos = true;
  1124. params.enable_chat_template = false;
  1125. }
  1126. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1127. add_opt(common_arg(
  1128. {"--in-prefix"}, "STRING",
  1129. "string to prefix user inputs with (default: empty)",
  1130. [](common_params & params, const std::string & value) {
  1131. params.input_prefix = value;
  1132. params.enable_chat_template = false;
  1133. }
  1134. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1135. add_opt(common_arg(
  1136. {"--in-suffix"}, "STRING",
  1137. "string to suffix after user inputs with (default: empty)",
  1138. [](common_params & params, const std::string & value) {
  1139. params.input_suffix = value;
  1140. params.enable_chat_template = false;
  1141. }
  1142. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1143. add_opt(common_arg(
  1144. {"--no-warmup"},
  1145. "skip warming up the model with an empty run",
  1146. [](common_params & params) {
  1147. params.warmup = false;
  1148. }
  1149. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_PERPLEXITY}));
  1150. add_opt(common_arg(
  1151. {"--spm-infill"},
  1152. string_format(
  1153. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  1154. params.spm_infill ? "enabled" : "disabled"
  1155. ),
  1156. [](common_params & params) {
  1157. params.spm_infill = true;
  1158. }
  1159. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1160. add_opt(common_arg(
  1161. {"--samplers"}, "SAMPLERS",
  1162. string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  1163. [](common_params & params, const std::string & value) {
  1164. const auto sampler_names = string_split<std::string>(value, ';');
  1165. params.sampling.samplers = common_sampler_types_from_names(sampler_names, true);
  1166. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_SAMPLERS;
  1167. }
  1168. ).set_sparam());
  1169. add_opt(common_arg(
  1170. {"-s", "--seed"}, "SEED",
  1171. string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED),
  1172. [](common_params & params, const std::string & value) {
  1173. params.sampling.seed = std::stoul(value);
  1174. }
  1175. ).set_sparam());
  1176. add_opt(common_arg(
  1177. {"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
  1178. string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  1179. [](common_params & params, const std::string & value) {
  1180. params.sampling.samplers = common_sampler_types_from_chars(value);
  1181. }
  1182. ).set_sparam());
  1183. add_opt(common_arg(
  1184. {"--ignore-eos"},
  1185. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  1186. [](common_params & params) {
  1187. params.sampling.ignore_eos = true;
  1188. }
  1189. ).set_sparam());
  1190. add_opt(common_arg(
  1191. {"--temp"}, "N",
  1192. string_format("temperature (default: %.1f)", (double)params.sampling.temp),
  1193. [](common_params & params, const std::string & value) {
  1194. params.sampling.temp = std::stof(value);
  1195. params.sampling.temp = std::max(params.sampling.temp, 0.0f);
  1196. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TEMP;
  1197. }
  1198. ).set_sparam());
  1199. add_opt(common_arg(
  1200. {"--top-k"}, "N",
  1201. string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k),
  1202. [](common_params & params, int value) {
  1203. params.sampling.top_k = value;
  1204. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_K;
  1205. }
  1206. ).set_sparam());
  1207. add_opt(common_arg(
  1208. {"--top-p"}, "N",
  1209. string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p),
  1210. [](common_params & params, const std::string & value) {
  1211. params.sampling.top_p = std::stof(value);
  1212. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_P;
  1213. }
  1214. ).set_sparam());
  1215. add_opt(common_arg(
  1216. {"--min-p"}, "N",
  1217. string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p),
  1218. [](common_params & params, const std::string & value) {
  1219. params.sampling.min_p = std::stof(value);
  1220. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIN_P;
  1221. }
  1222. ).set_sparam());
  1223. add_opt(common_arg(
  1224. {"--top-nsigma"}, "N",
  1225. string_format("top-n-sigma sampling (default: %.1f, -1.0 = disabled)", params.sampling.top_n_sigma),
  1226. [](common_params & params, const std::string & value) {
  1227. params.sampling.top_n_sigma = std::stof(value);
  1228. }
  1229. ).set_sparam());
  1230. add_opt(common_arg(
  1231. {"--xtc-probability"}, "N",
  1232. string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
  1233. [](common_params & params, const std::string & value) {
  1234. params.sampling.xtc_probability = std::stof(value);
  1235. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_PROBABILITY;
  1236. }
  1237. ).set_sparam());
  1238. add_opt(common_arg(
  1239. {"--xtc-threshold"}, "N",
  1240. string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold),
  1241. [](common_params & params, const std::string & value) {
  1242. params.sampling.xtc_threshold = std::stof(value);
  1243. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_THRESHOLD;
  1244. }
  1245. ).set_sparam());
  1246. add_opt(common_arg(
  1247. {"--typical"}, "N",
  1248. string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p),
  1249. [](common_params & params, const std::string & value) {
  1250. params.sampling.typ_p = std::stof(value);
  1251. }
  1252. ).set_sparam());
  1253. add_opt(common_arg(
  1254. {"--repeat-last-n"}, "N",
  1255. string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
  1256. [](common_params & params, int value) {
  1257. if (value < -1) {
  1258. throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
  1259. }
  1260. params.sampling.penalty_last_n = value;
  1261. params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
  1262. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_LAST_N;
  1263. }
  1264. ).set_sparam());
  1265. add_opt(common_arg(
  1266. {"--repeat-penalty"}, "N",
  1267. string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat),
  1268. [](common_params & params, const std::string & value) {
  1269. params.sampling.penalty_repeat = std::stof(value);
  1270. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_REPEAT;
  1271. }
  1272. ).set_sparam());
  1273. add_opt(common_arg(
  1274. {"--presence-penalty"}, "N",
  1275. string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present),
  1276. [](common_params & params, const std::string & value) {
  1277. params.sampling.penalty_present = std::stof(value);
  1278. }
  1279. ).set_sparam());
  1280. add_opt(common_arg(
  1281. {"--frequency-penalty"}, "N",
  1282. string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq),
  1283. [](common_params & params, const std::string & value) {
  1284. params.sampling.penalty_freq = std::stof(value);
  1285. }
  1286. ).set_sparam());
  1287. add_opt(common_arg(
  1288. {"--dry-multiplier"}, "N",
  1289. string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier),
  1290. [](common_params & params, const std::string & value) {
  1291. params.sampling.dry_multiplier = std::stof(value);
  1292. }
  1293. ).set_sparam());
  1294. add_opt(common_arg(
  1295. {"--dry-base"}, "N",
  1296. string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base),
  1297. [](common_params & params, const std::string & value) {
  1298. float potential_base = std::stof(value);
  1299. if (potential_base >= 1.0f)
  1300. {
  1301. params.sampling.dry_base = potential_base;
  1302. }
  1303. }
  1304. ).set_sparam());
  1305. add_opt(common_arg(
  1306. {"--dry-allowed-length"}, "N",
  1307. string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length),
  1308. [](common_params & params, int value) {
  1309. params.sampling.dry_allowed_length = value;
  1310. }
  1311. ).set_sparam());
  1312. add_opt(common_arg(
  1313. {"--dry-penalty-last-n"}, "N",
  1314. string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
  1315. [](common_params & params, int value) {
  1316. if (value < -1) {
  1317. throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
  1318. }
  1319. params.sampling.dry_penalty_last_n = value;
  1320. }
  1321. ).set_sparam());
  1322. add_opt(common_arg(
  1323. {"--dry-sequence-breaker"}, "STRING",
  1324. string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n",
  1325. params.sampling.dry_sequence_breakers.empty() ? "none" :
  1326. std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()),
  1327. params.sampling.dry_sequence_breakers.end(),
  1328. std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'",
  1329. [](const std::string& a, const std::string& b) {
  1330. std::string formatted_b = (b == "\n") ? "\\n" : b;
  1331. return a + ", '" + formatted_b + "'";
  1332. }).c_str()),
  1333. [](common_params & params, const std::string & value) {
  1334. static bool defaults_cleared = false;
  1335. if (!defaults_cleared) {
  1336. params.sampling.dry_sequence_breakers.clear();
  1337. defaults_cleared = true;
  1338. }
  1339. if (value == "none") {
  1340. params.sampling.dry_sequence_breakers.clear();
  1341. } else {
  1342. params.sampling.dry_sequence_breakers.emplace_back(value);
  1343. }
  1344. }
  1345. ).set_sparam());
  1346. add_opt(common_arg(
  1347. {"--dynatemp-range"}, "N",
  1348. string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range),
  1349. [](common_params & params, const std::string & value) {
  1350. params.sampling.dynatemp_range = std::stof(value);
  1351. }
  1352. ).set_sparam());
  1353. add_opt(common_arg(
  1354. {"--dynatemp-exp"}, "N",
  1355. string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent),
  1356. [](common_params & params, const std::string & value) {
  1357. params.sampling.dynatemp_exponent = std::stof(value);
  1358. }
  1359. ).set_sparam());
  1360. add_opt(common_arg(
  1361. {"--mirostat"}, "N",
  1362. string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n"
  1363. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat),
  1364. [](common_params & params, int value) {
  1365. params.sampling.mirostat = value;
  1366. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT;
  1367. }
  1368. ).set_sparam());
  1369. add_opt(common_arg(
  1370. {"--mirostat-lr"}, "N",
  1371. string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta),
  1372. [](common_params & params, const std::string & value) {
  1373. params.sampling.mirostat_eta = std::stof(value);
  1374. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_ETA;
  1375. }
  1376. ).set_sparam());
  1377. add_opt(common_arg(
  1378. {"--mirostat-ent"}, "N",
  1379. string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau),
  1380. [](common_params & params, const std::string & value) {
  1381. params.sampling.mirostat_tau = std::stof(value);
  1382. params.sampling.user_sampling_config |= common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_TAU;
  1383. }
  1384. ).set_sparam());
  1385. add_opt(common_arg(
  1386. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1387. "modifies the likelihood of token appearing in the completion,\n"
  1388. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1389. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1390. [](common_params & params, const std::string & value) {
  1391. std::stringstream ss(value);
  1392. llama_token key;
  1393. char sign;
  1394. std::string value_str;
  1395. try {
  1396. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1397. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1398. params.sampling.logit_bias.push_back({key, bias});
  1399. } else {
  1400. throw std::invalid_argument("invalid input format");
  1401. }
  1402. } catch (const std::exception&) {
  1403. throw std::invalid_argument("invalid input format");
  1404. }
  1405. }
  1406. ).set_sparam());
  1407. add_opt(common_arg(
  1408. {"--grammar"}, "GRAMMAR",
  1409. string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()),
  1410. [](common_params & params, const std::string & value) {
  1411. params.sampling.grammar = value;
  1412. }
  1413. ).set_sparam());
  1414. add_opt(common_arg(
  1415. {"--grammar-file"}, "FNAME",
  1416. "file to read grammar from",
  1417. [](common_params & params, const std::string & value) {
  1418. params.sampling.grammar = read_file(value);
  1419. }
  1420. ).set_sparam());
  1421. add_opt(common_arg(
  1422. {"-j", "--json-schema"}, "SCHEMA",
  1423. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1424. [](common_params & params, const std::string & value) {
  1425. params.sampling.grammar = json_schema_to_grammar(json::parse(value));
  1426. }
  1427. ).set_sparam());
  1428. add_opt(common_arg(
  1429. {"-jf", "--json-schema-file"}, "FILE",
  1430. "File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1431. [](common_params & params, const std::string & value) {
  1432. std::ifstream file(value);
  1433. if (!file) {
  1434. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1435. }
  1436. std::string schema;
  1437. std::copy(
  1438. std::istreambuf_iterator<char>(file),
  1439. std::istreambuf_iterator<char>(),
  1440. std::back_inserter(schema)
  1441. );
  1442. params.sampling.grammar = json_schema_to_grammar(json::parse(schema));
  1443. }
  1444. ).set_sparam());
  1445. add_opt(common_arg(
  1446. {"--pooling"}, "{none,mean,cls,last,rank}",
  1447. "pooling type for embeddings, use model default if unspecified",
  1448. [](common_params & params, const std::string & value) {
  1449. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1450. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1451. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1452. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1453. else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; }
  1454. else { throw std::invalid_argument("invalid value"); }
  1455. }
  1456. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING"));
  1457. add_opt(common_arg(
  1458. {"--attention"}, "{causal,non-causal}",
  1459. "attention type for embeddings, use model default if unspecified",
  1460. [](common_params & params, const std::string & value) {
  1461. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1462. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1463. else { throw std::invalid_argument("invalid value"); }
  1464. }
  1465. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1466. add_opt(common_arg(
  1467. {"--rope-scaling"}, "{none,linear,yarn}",
  1468. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1469. [](common_params & params, const std::string & value) {
  1470. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1471. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1472. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1473. else { throw std::invalid_argument("invalid value"); }
  1474. }
  1475. ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE"));
  1476. add_opt(common_arg(
  1477. {"--rope-scale"}, "N",
  1478. "RoPE context scaling factor, expands context by a factor of N",
  1479. [](common_params & params, const std::string & value) {
  1480. params.rope_freq_scale = 1.0f / std::stof(value);
  1481. }
  1482. ).set_env("LLAMA_ARG_ROPE_SCALE"));
  1483. add_opt(common_arg(
  1484. {"--rope-freq-base"}, "N",
  1485. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1486. [](common_params & params, const std::string & value) {
  1487. params.rope_freq_base = std::stof(value);
  1488. }
  1489. ).set_env("LLAMA_ARG_ROPE_FREQ_BASE"));
  1490. add_opt(common_arg(
  1491. {"--rope-freq-scale"}, "N",
  1492. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1493. [](common_params & params, const std::string & value) {
  1494. params.rope_freq_scale = std::stof(value);
  1495. }
  1496. ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE"));
  1497. add_opt(common_arg(
  1498. {"--yarn-orig-ctx"}, "N",
  1499. string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1500. [](common_params & params, int value) {
  1501. params.yarn_orig_ctx = value;
  1502. }
  1503. ).set_env("LLAMA_ARG_YARN_ORIG_CTX"));
  1504. add_opt(common_arg(
  1505. {"--yarn-ext-factor"}, "N",
  1506. string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1507. [](common_params & params, const std::string & value) {
  1508. params.yarn_ext_factor = std::stof(value);
  1509. }
  1510. ).set_env("LLAMA_ARG_YARN_EXT_FACTOR"));
  1511. add_opt(common_arg(
  1512. {"--yarn-attn-factor"}, "N",
  1513. string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1514. [](common_params & params, const std::string & value) {
  1515. params.yarn_attn_factor = std::stof(value);
  1516. }
  1517. ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR"));
  1518. add_opt(common_arg(
  1519. {"--yarn-beta-slow"}, "N",
  1520. string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1521. [](common_params & params, const std::string & value) {
  1522. params.yarn_beta_slow = std::stof(value);
  1523. }
  1524. ).set_env("LLAMA_ARG_YARN_BETA_SLOW"));
  1525. add_opt(common_arg(
  1526. {"--yarn-beta-fast"}, "N",
  1527. string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1528. [](common_params & params, const std::string & value) {
  1529. params.yarn_beta_fast = std::stof(value);
  1530. }
  1531. ).set_env("LLAMA_ARG_YARN_BETA_FAST"));
  1532. add_opt(common_arg(
  1533. {"-gan", "--grp-attn-n"}, "N",
  1534. string_format("group-attention factor (default: %d)", params.grp_attn_n),
  1535. [](common_params & params, int value) {
  1536. params.grp_attn_n = value;
  1537. }
  1538. ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_PASSKEY}));
  1539. add_opt(common_arg(
  1540. {"-gaw", "--grp-attn-w"}, "N",
  1541. string_format("group-attention width (default: %d)", params.grp_attn_w),
  1542. [](common_params & params, int value) {
  1543. params.grp_attn_w = value;
  1544. }
  1545. ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_MAIN}));
  1546. add_opt(common_arg(
  1547. {"-nkvo", "--no-kv-offload"},
  1548. "disable KV offload",
  1549. [](common_params & params) {
  1550. params.no_kv_offload = true;
  1551. }
  1552. ).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
  1553. add_opt(common_arg(
  1554. {"-nr", "--no-repack"},
  1555. "disable weight repacking",
  1556. [](common_params & params) {
  1557. params.no_extra_bufts = true;
  1558. }
  1559. ).set_env("LLAMA_ARG_NO_REPACK"));
  1560. add_opt(common_arg(
  1561. {"--no-host"},
  1562. "bypass host buffer allowing extra buffers to be used",
  1563. [](common_params & params) {
  1564. params.no_host = true;
  1565. }
  1566. ).set_env("LLAMA_ARG_NO_HOST"));
  1567. add_opt(common_arg(
  1568. {"-ctk", "--cache-type-k"}, "TYPE",
  1569. string_format(
  1570. "KV cache data type for K\n"
  1571. "allowed values: %s\n"
  1572. "(default: %s)",
  1573. get_all_kv_cache_types().c_str(),
  1574. ggml_type_name(params.cache_type_k)
  1575. ),
  1576. [](common_params & params, const std::string & value) {
  1577. params.cache_type_k = kv_cache_type_from_str(value);
  1578. }
  1579. ).set_env("LLAMA_ARG_CACHE_TYPE_K"));
  1580. add_opt(common_arg(
  1581. {"-ctv", "--cache-type-v"}, "TYPE",
  1582. string_format(
  1583. "KV cache data type for V\n"
  1584. "allowed values: %s\n"
  1585. "(default: %s)",
  1586. get_all_kv_cache_types().c_str(),
  1587. ggml_type_name(params.cache_type_v)
  1588. ),
  1589. [](common_params & params, const std::string & value) {
  1590. params.cache_type_v = kv_cache_type_from_str(value);
  1591. }
  1592. ).set_env("LLAMA_ARG_CACHE_TYPE_V"));
  1593. add_opt(common_arg(
  1594. {"--hellaswag"},
  1595. "compute HellaSwag score over random tasks from datafile supplied with -f",
  1596. [](common_params & params) {
  1597. params.hellaswag = true;
  1598. }
  1599. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1600. add_opt(common_arg(
  1601. {"--hellaswag-tasks"}, "N",
  1602. string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  1603. [](common_params & params, int value) {
  1604. params.hellaswag_tasks = value;
  1605. }
  1606. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1607. add_opt(common_arg(
  1608. {"--winogrande"},
  1609. "compute Winogrande score over random tasks from datafile supplied with -f",
  1610. [](common_params & params) {
  1611. params.winogrande = true;
  1612. }
  1613. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1614. add_opt(common_arg(
  1615. {"--winogrande-tasks"}, "N",
  1616. string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  1617. [](common_params & params, int value) {
  1618. params.winogrande_tasks = value;
  1619. }
  1620. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1621. add_opt(common_arg(
  1622. {"--multiple-choice"},
  1623. "compute multiple choice score over random tasks from datafile supplied with -f",
  1624. [](common_params & params) {
  1625. params.multiple_choice = true;
  1626. }
  1627. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1628. add_opt(common_arg(
  1629. {"--multiple-choice-tasks"}, "N",
  1630. string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  1631. [](common_params & params, int value) {
  1632. params.multiple_choice_tasks = value;
  1633. }
  1634. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1635. add_opt(common_arg(
  1636. {"--kl-divergence"},
  1637. "computes KL-divergence to logits provided via --kl-divergence-base",
  1638. [](common_params & params) {
  1639. params.kl_divergence = true;
  1640. }
  1641. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1642. add_opt(common_arg(
  1643. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  1644. "set logits file",
  1645. [](common_params & params, const std::string & value) {
  1646. params.logits_file = value;
  1647. }
  1648. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1649. add_opt(common_arg(
  1650. {"--ppl-stride"}, "N",
  1651. string_format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  1652. [](common_params & params, int value) {
  1653. params.ppl_stride = value;
  1654. }
  1655. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1656. add_opt(common_arg(
  1657. {"--ppl-output-type"}, "<0|1>",
  1658. string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  1659. [](common_params & params, int value) {
  1660. params.ppl_output_type = value;
  1661. }
  1662. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1663. add_opt(common_arg(
  1664. {"-dt", "--defrag-thold"}, "N",
  1665. string_format("KV cache defragmentation threshold (DEPRECATED)"),
  1666. [](common_params & params, const std::string & value) {
  1667. GGML_UNUSED(params);
  1668. GGML_UNUSED(value);
  1669. LOG_WRN("DEPRECATED: --defrag-thold is deprecated and no longer necessary to specify\n");
  1670. }
  1671. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  1672. add_opt(common_arg(
  1673. {"-np", "--parallel"}, "N",
  1674. string_format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  1675. [](common_params & params, int value) {
  1676. params.n_parallel = value;
  1677. }
  1678. ).set_env("LLAMA_ARG_N_PARALLEL"));
  1679. add_opt(common_arg(
  1680. {"-ns", "--sequences"}, "N",
  1681. string_format("number of sequences to decode (default: %d)", params.n_sequences),
  1682. [](common_params & params, int value) {
  1683. params.n_sequences = value;
  1684. }
  1685. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  1686. add_opt(common_arg(
  1687. {"-cb", "--cont-batching"},
  1688. string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  1689. [](common_params & params) {
  1690. params.cont_batching = true;
  1691. }
  1692. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  1693. add_opt(common_arg(
  1694. {"-nocb", "--no-cont-batching"},
  1695. "disable continuous batching",
  1696. [](common_params & params) {
  1697. params.cont_batching = false;
  1698. }
  1699. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  1700. add_opt(common_arg(
  1701. {"--mmproj"}, "FILE",
  1702. "path to a multimodal projector file. see tools/mtmd/README.md\n"
  1703. "note: if -hf is used, this argument can be omitted",
  1704. [](common_params & params, const std::string & value) {
  1705. params.mmproj.path = value;
  1706. }
  1707. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ"));
  1708. add_opt(common_arg(
  1709. {"--mmproj-url"}, "URL",
  1710. "URL to a multimodal projector file. see tools/mtmd/README.md",
  1711. [](common_params & params, const std::string & value) {
  1712. params.mmproj.url = value;
  1713. }
  1714. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ_URL"));
  1715. add_opt(common_arg(
  1716. {"--no-mmproj"},
  1717. "explicitly disable multimodal projector, useful when using -hf",
  1718. [](common_params & params) {
  1719. params.no_mmproj = true;
  1720. }
  1721. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ"));
  1722. add_opt(common_arg(
  1723. {"--no-mmproj-offload"},
  1724. "do not offload multimodal projector to GPU",
  1725. [](common_params & params) {
  1726. params.mmproj_use_gpu = false;
  1727. }
  1728. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD"));
  1729. add_opt(common_arg(
  1730. {"--image", "--audio"}, "FILE",
  1731. "path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n",
  1732. [](common_params & params, const std::string & value) {
  1733. params.image.emplace_back(value);
  1734. }
  1735. ).set_examples({LLAMA_EXAMPLE_MTMD}));
  1736. add_opt(common_arg(
  1737. {"--image-min-tokens"}, "N",
  1738. "minimum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)",
  1739. [](common_params & params, int value) {
  1740. params.image_min_tokens = value;
  1741. }
  1742. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_IMAGE_MIN_TOKENS"));
  1743. add_opt(common_arg(
  1744. {"--image-max-tokens"}, "N",
  1745. "maximum number of tokens each image can take, only used by vision models with dynamic resolution (default: read from model)",
  1746. [](common_params & params, int value) {
  1747. params.image_max_tokens = value;
  1748. }
  1749. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_IMAGE_MAX_TOKENS"));
  1750. if (llama_supports_rpc()) {
  1751. add_opt(common_arg(
  1752. {"--rpc"}, "SERVERS",
  1753. "comma separated list of RPC servers",
  1754. [](common_params & params, const std::string & value) {
  1755. add_rpc_devices(value);
  1756. GGML_UNUSED(params);
  1757. }
  1758. ).set_env("LLAMA_ARG_RPC"));
  1759. }
  1760. add_opt(common_arg(
  1761. {"--mlock"},
  1762. "force system to keep model in RAM rather than swapping or compressing",
  1763. [](common_params & params) {
  1764. params.use_mlock = true;
  1765. }
  1766. ).set_env("LLAMA_ARG_MLOCK"));
  1767. add_opt(common_arg(
  1768. {"--no-mmap"},
  1769. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  1770. [](common_params & params) {
  1771. params.use_mmap = false;
  1772. }
  1773. ).set_env("LLAMA_ARG_NO_MMAP"));
  1774. add_opt(common_arg(
  1775. {"--numa"}, "TYPE",
  1776. "attempt optimizations that help on some NUMA systems\n"
  1777. "- distribute: spread execution evenly over all nodes\n"
  1778. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  1779. "- numactl: use the CPU map provided by numactl\n"
  1780. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  1781. "see https://github.com/ggml-org/llama.cpp/issues/1437",
  1782. [](common_params & params, const std::string & value) {
  1783. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  1784. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  1785. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  1786. else { throw std::invalid_argument("invalid value"); }
  1787. }
  1788. ).set_env("LLAMA_ARG_NUMA"));
  1789. add_opt(common_arg(
  1790. {"-dev", "--device"}, "<dev1,dev2,..>",
  1791. "comma-separated list of devices to use for offloading (none = don't offload)\n"
  1792. "use --list-devices to see a list of available devices",
  1793. [](common_params & params, const std::string & value) {
  1794. params.devices = parse_device_list(value);
  1795. }
  1796. ).set_env("LLAMA_ARG_DEVICE"));
  1797. add_opt(common_arg(
  1798. {"--list-devices"},
  1799. "print list of available devices and exit",
  1800. [](common_params &) {
  1801. std::vector<ggml_backend_dev_t> devices;
  1802. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  1803. auto * dev = ggml_backend_dev_get(i);
  1804. if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
  1805. devices.push_back(dev);
  1806. }
  1807. }
  1808. printf("Available devices:\n");
  1809. for (auto * dev : devices) {
  1810. size_t free, total;
  1811. ggml_backend_dev_memory(dev, &free, &total);
  1812. printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
  1813. }
  1814. exit(0);
  1815. }
  1816. ));
  1817. add_opt(common_arg(
  1818. {"--override-tensor", "-ot"}, "<tensor name pattern>=<buffer type>,...",
  1819. "override tensor buffer type", [](common_params & params, const std::string & value) {
  1820. parse_tensor_buffer_overrides(value, params.tensor_buft_overrides);
  1821. }
  1822. ));
  1823. add_opt(common_arg(
  1824. {"--override-tensor-draft", "-otd"}, "<tensor name pattern>=<buffer type>,...",
  1825. "override tensor buffer type for draft model", [](common_params & params, const std::string & value) {
  1826. parse_tensor_buffer_overrides(value, params.speculative.tensor_buft_overrides);
  1827. }
  1828. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  1829. add_opt(common_arg(
  1830. {"--cpu-moe", "-cmoe"},
  1831. "keep all Mixture of Experts (MoE) weights in the CPU",
  1832. [](common_params & params) {
  1833. params.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
  1834. }
  1835. ).set_env("LLAMA_ARG_CPU_MOE"));
  1836. add_opt(common_arg(
  1837. {"--n-cpu-moe", "-ncmoe"}, "N",
  1838. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU",
  1839. [](common_params & params, int value) {
  1840. if (value < 0) {
  1841. throw std::invalid_argument("invalid value");
  1842. }
  1843. for (int i = 0; i < value; ++i) {
  1844. // keep strings alive and avoid leaking memory by storing them in a static vector
  1845. static std::list<std::string> buft_overrides;
  1846. buft_overrides.push_back(llm_ffn_exps_block_regex(i));
  1847. params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
  1848. }
  1849. }
  1850. ).set_env("LLAMA_ARG_N_CPU_MOE"));
  1851. add_opt(common_arg(
  1852. {"--cpu-moe-draft", "-cmoed"},
  1853. "keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
  1854. [](common_params & params) {
  1855. params.speculative.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
  1856. }
  1857. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
  1858. add_opt(common_arg(
  1859. {"--n-cpu-moe-draft", "-ncmoed"}, "N",
  1860. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model",
  1861. [](common_params & params, int value) {
  1862. if (value < 0) {
  1863. throw std::invalid_argument("invalid value");
  1864. }
  1865. for (int i = 0; i < value; ++i) {
  1866. static std::list<std::string> buft_overrides_draft;
  1867. buft_overrides_draft.push_back(llm_ffn_exps_block_regex(i));
  1868. params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
  1869. }
  1870. }
  1871. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_CPU_MOE_DRAFT"));
  1872. add_opt(common_arg(
  1873. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  1874. string_format("max. number of layers to store in VRAM (default: %d)", params.n_gpu_layers),
  1875. [](common_params & params, int value) {
  1876. params.n_gpu_layers = value;
  1877. if (!llama_supports_gpu_offload()) {
  1878. fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
  1879. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  1880. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  1881. }
  1882. }
  1883. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  1884. add_opt(common_arg(
  1885. {"-sm", "--split-mode"}, "{none,layer,row}",
  1886. "how to split the model across multiple GPUs, one of:\n"
  1887. "- none: use one GPU only\n"
  1888. "- layer (default): split layers and KV across GPUs\n"
  1889. "- row: split rows across GPUs",
  1890. [](common_params & params, const std::string & value) {
  1891. std::string arg_next = value;
  1892. if (arg_next == "none") {
  1893. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  1894. } else if (arg_next == "layer") {
  1895. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  1896. } else if (arg_next == "row") {
  1897. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  1898. } else {
  1899. throw std::invalid_argument("invalid value");
  1900. }
  1901. if (!llama_supports_gpu_offload()) {
  1902. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n");
  1903. }
  1904. }
  1905. ).set_env("LLAMA_ARG_SPLIT_MODE"));
  1906. add_opt(common_arg(
  1907. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  1908. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  1909. [](common_params & params, const std::string & value) {
  1910. std::string arg_next = value;
  1911. // split string by , and /
  1912. const std::regex regex{ R"([,/]+)" };
  1913. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  1914. std::vector<std::string> split_arg{ it, {} };
  1915. if (split_arg.size() >= llama_max_devices()) {
  1916. throw std::invalid_argument(
  1917. string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  1918. );
  1919. }
  1920. for (size_t i = 0; i < llama_max_devices(); ++i) {
  1921. if (i < split_arg.size()) {
  1922. params.tensor_split[i] = std::stof(split_arg[i]);
  1923. } else {
  1924. params.tensor_split[i] = 0.0f;
  1925. }
  1926. }
  1927. if (!llama_supports_gpu_offload()) {
  1928. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n");
  1929. }
  1930. }
  1931. ).set_env("LLAMA_ARG_TENSOR_SPLIT"));
  1932. add_opt(common_arg(
  1933. {"-mg", "--main-gpu"}, "INDEX",
  1934. string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  1935. [](common_params & params, int value) {
  1936. params.main_gpu = value;
  1937. if (!llama_supports_gpu_offload()) {
  1938. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n");
  1939. }
  1940. }
  1941. ).set_env("LLAMA_ARG_MAIN_GPU"));
  1942. add_opt(common_arg(
  1943. {"--check-tensors"},
  1944. string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  1945. [](common_params & params) {
  1946. params.check_tensors = true;
  1947. }
  1948. ));
  1949. add_opt(common_arg(
  1950. {"--override-kv"}, "KEY=TYPE:VALUE",
  1951. "advanced option to override model metadata by key. may be specified multiple times.\n"
  1952. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  1953. [](common_params & params, const std::string & value) {
  1954. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  1955. throw std::runtime_error(string_format("error: Invalid type for KV override: %s\n", value.c_str()));
  1956. }
  1957. }
  1958. ));
  1959. add_opt(common_arg(
  1960. {"--no-op-offload"},
  1961. string_format("disable offloading host tensor operations to device (default: %s)", params.no_op_offload ? "true" : "false"),
  1962. [](common_params & params) {
  1963. params.no_op_offload = true;
  1964. }
  1965. ));
  1966. add_opt(common_arg(
  1967. {"--lora"}, "FNAME",
  1968. "path to LoRA adapter (can be repeated to use multiple adapters)",
  1969. [](common_params & params, const std::string & value) {
  1970. params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
  1971. }
  1972. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1973. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1974. add_opt(common_arg(
  1975. {"--lora-scaled"}, "FNAME", "SCALE",
  1976. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  1977. [](common_params & params, const std::string & fname, const std::string & scale) {
  1978. params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
  1979. }
  1980. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  1981. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  1982. add_opt(common_arg(
  1983. {"--control-vector"}, "FNAME",
  1984. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  1985. [](common_params & params, const std::string & value) {
  1986. params.control_vectors.push_back({ 1.0f, value, });
  1987. }
  1988. ));
  1989. add_opt(common_arg(
  1990. {"--control-vector-scaled"}, "FNAME", "SCALE",
  1991. "add a control vector with user defined scaling SCALE\n"
  1992. "note: this argument can be repeated to add multiple scaled control vectors",
  1993. [](common_params & params, const std::string & fname, const std::string & scale) {
  1994. params.control_vectors.push_back({ std::stof(scale), fname });
  1995. }
  1996. ));
  1997. add_opt(common_arg(
  1998. {"--control-vector-layer-range"}, "START", "END",
  1999. "layer range to apply the control vector(s) to, start and end inclusive",
  2000. [](common_params & params, const std::string & start, const std::string & end) {
  2001. params.control_vector_layer_start = std::stoi(start);
  2002. params.control_vector_layer_end = std::stoi(end);
  2003. }
  2004. ));
  2005. add_opt(common_arg(
  2006. {"-a", "--alias"}, "STRING",
  2007. "set alias for model name (to be used by REST API)",
  2008. [](common_params & params, const std::string & value) {
  2009. params.model_alias = value;
  2010. }
  2011. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS"));
  2012. add_opt(common_arg(
  2013. {"-m", "--model"}, "FNAME",
  2014. ex == LLAMA_EXAMPLE_EXPORT_LORA
  2015. ? "model path from which to load base model"
  2016. : "model path to load",
  2017. [](common_params & params, const std::string & value) {
  2018. params.model.path = value;
  2019. }
  2020. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  2021. add_opt(common_arg(
  2022. {"-mu", "--model-url"}, "MODEL_URL",
  2023. "model download url (default: unused)",
  2024. [](common_params & params, const std::string & value) {
  2025. params.model.url = value;
  2026. }
  2027. ).set_env("LLAMA_ARG_MODEL_URL"));
  2028. add_opt(common_arg(
  2029. { "-dr", "--docker-repo" }, "[<repo>/]<model>[:quant]",
  2030. "Docker Hub model repository. repo is optional, default to ai/. quant is optional, default to :latest.\n"
  2031. "example: gemma3\n"
  2032. "(default: unused)",
  2033. [](common_params & params, const std::string & value) {
  2034. params.model.docker_repo = value;
  2035. }
  2036. ).set_env("LLAMA_ARG_DOCKER_REPO"));
  2037. add_opt(common_arg(
  2038. {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]",
  2039. "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
  2040. "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n"
  2041. "example: unsloth/phi-4-GGUF:q4_k_m\n"
  2042. "(default: unused)",
  2043. [](common_params & params, const std::string & value) {
  2044. params.model.hf_repo = value;
  2045. }
  2046. ).set_env("LLAMA_ARG_HF_REPO"));
  2047. add_opt(common_arg(
  2048. {"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
  2049. "Same as --hf-repo, but for the draft model (default: unused)",
  2050. [](common_params & params, const std::string & value) {
  2051. params.speculative.model.hf_repo = value;
  2052. }
  2053. ).set_env("LLAMA_ARG_HFD_REPO"));
  2054. add_opt(common_arg(
  2055. {"-hff", "--hf-file"}, "FILE",
  2056. "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
  2057. [](common_params & params, const std::string & value) {
  2058. params.model.hf_file = value;
  2059. }
  2060. ).set_env("LLAMA_ARG_HF_FILE"));
  2061. add_opt(common_arg(
  2062. {"-hfv", "-hfrv", "--hf-repo-v"}, "<user>/<model>[:quant]",
  2063. "Hugging Face model repository for the vocoder model (default: unused)",
  2064. [](common_params & params, const std::string & value) {
  2065. params.vocoder.model.hf_repo = value;
  2066. }
  2067. ).set_env("LLAMA_ARG_HF_REPO_V"));
  2068. add_opt(common_arg(
  2069. {"-hffv", "--hf-file-v"}, "FILE",
  2070. "Hugging Face model file for the vocoder model (default: unused)",
  2071. [](common_params & params, const std::string & value) {
  2072. params.vocoder.model.hf_file = value;
  2073. }
  2074. ).set_env("LLAMA_ARG_HF_FILE_V"));
  2075. add_opt(common_arg(
  2076. {"-hft", "--hf-token"}, "TOKEN",
  2077. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  2078. [](common_params & params, const std::string & value) {
  2079. params.hf_token = value;
  2080. }
  2081. ).set_env("HF_TOKEN"));
  2082. add_opt(common_arg(
  2083. {"--context-file"}, "FNAME",
  2084. "file to load context from (repeat to specify multiple files)",
  2085. [](common_params & params, const std::string & value) {
  2086. std::ifstream file(value, std::ios::binary);
  2087. if (!file) {
  2088. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2089. }
  2090. params.context_files.push_back(value);
  2091. }
  2092. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2093. add_opt(common_arg(
  2094. {"--chunk-size"}, "N",
  2095. string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  2096. [](common_params & params, int value) {
  2097. params.chunk_size = value;
  2098. }
  2099. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2100. add_opt(common_arg(
  2101. {"--chunk-separator"}, "STRING",
  2102. string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  2103. [](common_params & params, const std::string & value) {
  2104. params.chunk_separator = value;
  2105. }
  2106. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2107. add_opt(common_arg(
  2108. {"--junk"}, "N",
  2109. string_format("number of times to repeat the junk text (default: %d)", params.n_junk),
  2110. [](common_params & params, int value) {
  2111. params.n_junk = value;
  2112. }
  2113. ).set_examples({LLAMA_EXAMPLE_PASSKEY, LLAMA_EXAMPLE_PARALLEL}));
  2114. add_opt(common_arg(
  2115. {"--pos"}, "N",
  2116. string_format("position of the passkey in the junk text (default: %d)", params.i_pos),
  2117. [](common_params & params, int value) {
  2118. params.i_pos = value;
  2119. }
  2120. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  2121. add_opt(common_arg(
  2122. {"-o", "--output", "--output-file"}, "FNAME",
  2123. string_format("output file (default: '%s')", params.out_file.c_str()),
  2124. [](common_params & params, const std::string & value) {
  2125. params.out_file = value;
  2126. }
  2127. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE}));
  2128. add_opt(common_arg(
  2129. {"-ofreq", "--output-frequency"}, "N",
  2130. string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  2131. [](common_params & params, int value) {
  2132. params.n_out_freq = value;
  2133. }
  2134. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2135. add_opt(common_arg(
  2136. {"--output-format"}, "{gguf,dat}",
  2137. string_format("output format for imatrix file (default: %s)", params.imat_dat > 0 ? "dat" : "gguf"),
  2138. [](common_params & params, const std::string & value) {
  2139. /**/ if (value == "gguf") { params.imat_dat = -1; }
  2140. else if (value == "dat") { params.imat_dat = 1; }
  2141. else { throw std::invalid_argument("invalid output format"); }
  2142. }
  2143. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2144. add_opt(common_arg(
  2145. {"--save-frequency"}, "N",
  2146. string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  2147. [](common_params & params, int value) {
  2148. params.n_save_freq = value;
  2149. }
  2150. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2151. add_opt(common_arg(
  2152. {"--process-output"},
  2153. string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  2154. [](common_params & params) {
  2155. params.process_output = true;
  2156. }
  2157. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2158. add_opt(common_arg(
  2159. {"--no-ppl"},
  2160. string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  2161. [](common_params & params) {
  2162. params.compute_ppl = false;
  2163. }
  2164. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2165. add_opt(common_arg(
  2166. {"--chunk", "--from-chunk"}, "N",
  2167. string_format("start processing the input from chunk N (default: %d)", params.i_chunk),
  2168. [](common_params & params, int value) {
  2169. params.i_chunk = value;
  2170. }
  2171. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2172. add_opt(common_arg(
  2173. {"--show-statistics"},
  2174. string_format("show imatrix statistics and then exit (default: %s)", params.show_statistics ? "true" : "false"),
  2175. [](common_params & params) {
  2176. params.show_statistics = true;
  2177. }
  2178. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2179. add_opt(common_arg(
  2180. {"--parse-special"},
  2181. string_format("parse special tokens (chat, tool, etc) (default: %s)", params.parse_special ? "true" : "false"),
  2182. [](common_params & params) {
  2183. params.parse_special = true;
  2184. }
  2185. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2186. add_opt(common_arg(
  2187. {"-pps"},
  2188. string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  2189. [](common_params & params) {
  2190. params.is_pp_shared = true;
  2191. }
  2192. ).set_examples({LLAMA_EXAMPLE_BENCH, LLAMA_EXAMPLE_PARALLEL}));
  2193. add_opt(common_arg(
  2194. {"-tgs"},
  2195. string_format("is the text generation separated across the different sequences (default: %s)", params.is_tg_separate ? "true" : "false"),
  2196. [](common_params & params) {
  2197. params.is_tg_separate = true;
  2198. }
  2199. ).set_examples({LLAMA_EXAMPLE_BENCH, LLAMA_EXAMPLE_PARALLEL}));
  2200. add_opt(common_arg(
  2201. {"-npp"}, "n0,n1,...",
  2202. "number of prompt tokens",
  2203. [](common_params & params, const std::string & value) {
  2204. auto p = string_split<int>(value, ',');
  2205. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  2206. }
  2207. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2208. add_opt(common_arg(
  2209. {"-ntg"}, "n0,n1,...",
  2210. "number of text generation tokens",
  2211. [](common_params & params, const std::string & value) {
  2212. auto p = string_split<int>(value, ',');
  2213. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  2214. }
  2215. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2216. add_opt(common_arg(
  2217. {"-npl"}, "n0,n1,...",
  2218. "number of parallel prompts",
  2219. [](common_params & params, const std::string & value) {
  2220. auto p = string_split<int>(value, ',');
  2221. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  2222. }
  2223. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2224. add_opt(common_arg(
  2225. {"--embd-normalize"}, "N",
  2226. string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  2227. [](common_params & params, int value) {
  2228. params.embd_normalize = value;
  2229. }
  2230. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2231. add_opt(common_arg(
  2232. {"--embd-output-format"}, "FORMAT",
  2233. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix, \"raw\" = plain whitespace-delimited output (one embedding per line)",
  2234. [](common_params & params, const std::string & value) {
  2235. params.embd_out = value;
  2236. }
  2237. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2238. add_opt(common_arg(
  2239. {"--embd-separator"}, "STRING",
  2240. "separator of embeddings (default \\n) for example \"<#sep#>\"",
  2241. [](common_params & params, const std::string & value) {
  2242. params.embd_sep = value;
  2243. }
  2244. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2245. add_opt(common_arg(
  2246. {"--cls-separator"}, "STRING",
  2247. "separator of classification sequences (default \\t) for example \"<#seq#>\"",
  2248. [](common_params & params, const std::string & value) {
  2249. params.cls_sep = value;
  2250. }
  2251. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2252. add_opt(common_arg(
  2253. {"--host"}, "HOST",
  2254. string_format("ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: %s)", params.hostname.c_str()),
  2255. [](common_params & params, const std::string & value) {
  2256. params.hostname = value;
  2257. }
  2258. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  2259. add_opt(common_arg(
  2260. {"--port"}, "PORT",
  2261. string_format("port to listen (default: %d)", params.port),
  2262. [](common_params & params, int value) {
  2263. params.port = value;
  2264. }
  2265. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  2266. add_opt(common_arg(
  2267. {"--path"}, "PATH",
  2268. string_format("path to serve static files from (default: %s)", params.public_path.c_str()),
  2269. [](common_params & params, const std::string & value) {
  2270. params.public_path = value;
  2271. }
  2272. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
  2273. add_opt(common_arg(
  2274. {"--api-prefix"}, "PREFIX",
  2275. string_format("prefix path the server serves from, without the trailing slash (default: %s)", params.api_prefix.c_str()),
  2276. [](common_params & params, const std::string & value) {
  2277. params.api_prefix = value;
  2278. }
  2279. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_API_PREFIX"));
  2280. add_opt(common_arg(
  2281. {"--no-webui"},
  2282. string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),
  2283. [](common_params & params) {
  2284. params.webui = false;
  2285. }
  2286. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI"));
  2287. add_opt(common_arg(
  2288. {"--embedding", "--embeddings"},
  2289. string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  2290. [](common_params & params) {
  2291. params.embedding = true;
  2292. }
  2293. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  2294. add_opt(common_arg(
  2295. {"--reranking", "--rerank"},
  2296. string_format("enable reranking endpoint on server (default: %s)", "disabled"),
  2297. [](common_params & params) {
  2298. params.embedding = true;
  2299. params.pooling_type = LLAMA_POOLING_TYPE_RANK;
  2300. }
  2301. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING"));
  2302. add_opt(common_arg(
  2303. {"--api-key"}, "KEY",
  2304. "API key to use for authentication (default: none)",
  2305. [](common_params & params, const std::string & value) {
  2306. params.api_keys.push_back(value);
  2307. }
  2308. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  2309. add_opt(common_arg(
  2310. {"--api-key-file"}, "FNAME",
  2311. "path to file containing API keys (default: none)",
  2312. [](common_params & params, const std::string & value) {
  2313. std::ifstream key_file(value);
  2314. if (!key_file) {
  2315. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2316. }
  2317. std::string key;
  2318. while (std::getline(key_file, key)) {
  2319. if (!key.empty()) {
  2320. params.api_keys.push_back(key);
  2321. }
  2322. }
  2323. key_file.close();
  2324. }
  2325. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2326. add_opt(common_arg(
  2327. {"--ssl-key-file"}, "FNAME",
  2328. "path to file a PEM-encoded SSL private key",
  2329. [](common_params & params, const std::string & value) {
  2330. params.ssl_file_key = value;
  2331. }
  2332. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE"));
  2333. add_opt(common_arg(
  2334. {"--ssl-cert-file"}, "FNAME",
  2335. "path to file a PEM-encoded SSL certificate",
  2336. [](common_params & params, const std::string & value) {
  2337. params.ssl_file_cert = value;
  2338. }
  2339. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE"));
  2340. add_opt(common_arg(
  2341. {"--chat-template-kwargs"}, "STRING",
  2342. string_format("sets additional params for the json template parser"),
  2343. [](common_params & params, const std::string & value) {
  2344. auto parsed = json::parse(value);
  2345. for (const auto & item : parsed.items()) {
  2346. params.default_template_kwargs[item.key()] = item.value().dump();
  2347. }
  2348. }
  2349. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_CHAT_TEMPLATE_KWARGS"));
  2350. add_opt(common_arg(
  2351. {"-to", "--timeout"}, "N",
  2352. string_format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  2353. [](common_params & params, int value) {
  2354. params.timeout_read = value;
  2355. params.timeout_write = value;
  2356. }
  2357. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT"));
  2358. add_opt(common_arg(
  2359. {"--threads-http"}, "N",
  2360. string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  2361. [](common_params & params, int value) {
  2362. params.n_threads_http = value;
  2363. }
  2364. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  2365. add_opt(common_arg(
  2366. {"--cache-reuse"}, "N",
  2367. string_format(
  2368. "min chunk size to attempt reusing from the cache via KV shifting (default: %d)\n"
  2369. "[(card)](https://ggml.ai/f0.png)", params.n_cache_reuse
  2370. ),
  2371. [](common_params & params, int value) {
  2372. params.n_cache_reuse = value;
  2373. }
  2374. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
  2375. add_opt(common_arg(
  2376. {"--metrics"},
  2377. string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  2378. [](common_params & params) {
  2379. params.endpoint_metrics = true;
  2380. }
  2381. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  2382. add_opt(common_arg(
  2383. {"--props"},
  2384. string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
  2385. [](common_params & params) {
  2386. params.endpoint_props = true;
  2387. }
  2388. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
  2389. add_opt(common_arg(
  2390. {"--slots"},
  2391. string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  2392. [](common_params & params) {
  2393. params.endpoint_slots = true;
  2394. }
  2395. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
  2396. add_opt(common_arg(
  2397. {"--no-slots"},
  2398. "disables slots monitoring endpoint",
  2399. [](common_params & params) {
  2400. params.endpoint_slots = false;
  2401. }
  2402. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  2403. add_opt(common_arg(
  2404. {"--slot-save-path"}, "PATH",
  2405. "path to save slot kv cache (default: disabled)",
  2406. [](common_params & params, const std::string & value) {
  2407. params.slot_save_path = value;
  2408. // if doesn't end with DIRECTORY_SEPARATOR, add it
  2409. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  2410. params.slot_save_path += DIRECTORY_SEPARATOR;
  2411. }
  2412. }
  2413. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2414. add_opt(common_arg(
  2415. {"--models-dir"}, "PATH",
  2416. "directory containing models for the router server (default: disabled)",
  2417. [](common_params & params, const std::string & value) {
  2418. params.models_dir = value;
  2419. }
  2420. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODELS_DIR"));
  2421. add_opt(common_arg(
  2422. {"--models-max"}, "N",
  2423. string_format("for router server, maximum number of models to load simultaneously (default: %d, 0 = unlimited)", params.models_max),
  2424. [](common_params & params, int value) {
  2425. params.models_max = value;
  2426. }
  2427. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODELS_MAX"));
  2428. add_opt(common_arg(
  2429. {"--no-models-autoload"},
  2430. "disables automatic loading of models (default: enabled)",
  2431. [](common_params & params) {
  2432. params.models_autoload = false;
  2433. }
  2434. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_MODELS_AUTOLOAD"));
  2435. add_opt(common_arg(
  2436. {"--jinja"},
  2437. string_format("use jinja template for chat (default: %s)\n", params.use_jinja ? "enabled" : "disabled"),
  2438. [](common_params & params) {
  2439. params.use_jinja = true;
  2440. }
  2441. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_JINJA"));
  2442. add_opt(common_arg(
  2443. {"--no-jinja"},
  2444. string_format("disable jinja template for chat (default: %s)\n", params.use_jinja ? "enabled" : "disabled"),
  2445. [](common_params & params) {
  2446. params.use_jinja = false;
  2447. }
  2448. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_NO_JINJA"));
  2449. add_opt(common_arg(
  2450. {"--reasoning-format"}, "FORMAT",
  2451. "controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:\n"
  2452. "- none: leaves thoughts unparsed in `message.content`\n"
  2453. "- deepseek: puts thoughts in `message.reasoning_content`\n"
  2454. "- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`\n"
  2455. "(default: auto)",
  2456. [](common_params & params, const std::string & value) {
  2457. params.reasoning_format = common_reasoning_format_from_name(value);
  2458. }
  2459. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK"));
  2460. add_opt(common_arg(
  2461. {"--reasoning-budget"}, "N",
  2462. "controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)",
  2463. [](common_params & params, int value) {
  2464. if (value != 0 && value != -1) { throw std::invalid_argument("invalid value"); }
  2465. params.reasoning_budget = value;
  2466. }
  2467. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK_BUDGET"));
  2468. add_opt(common_arg(
  2469. {"--chat-template"}, "JINJA_TEMPLATE",
  2470. string_format(
  2471. "set custom jinja chat template (default: template taken from model's metadata)\n"
  2472. "if suffix/prefix are specified, template will be disabled\n"
  2473. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2474. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2475. ),
  2476. [](common_params & params, const std::string & value) {
  2477. params.chat_template = value;
  2478. }
  2479. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  2480. add_opt(common_arg(
  2481. {"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
  2482. string_format(
  2483. "set custom jinja chat template file (default: template taken from model's metadata)\n"
  2484. "if suffix/prefix are specified, template will be disabled\n"
  2485. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2486. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2487. ),
  2488. [](common_params & params, const std::string & value) {
  2489. params.chat_template = read_file(value);
  2490. }
  2491. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
  2492. add_opt(common_arg(
  2493. {"--no-prefill-assistant"},
  2494. string_format(
  2495. "whether to prefill the assistant's response if the last message is an assistant message (default: prefill enabled)\n"
  2496. "when this flag is set, if the last message is an assistant message then it will be treated as a full message and not prefilled\n"
  2497. ),
  2498. [](common_params & params) {
  2499. params.prefill_assistant = false;
  2500. }
  2501. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_PREFILL_ASSISTANT"));
  2502. add_opt(common_arg(
  2503. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  2504. string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  2505. [](common_params & params, const std::string & value) {
  2506. params.slot_prompt_similarity = std::stof(value);
  2507. }
  2508. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2509. add_opt(common_arg(
  2510. {"--lora-init-without-apply"},
  2511. string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  2512. [](common_params & params) {
  2513. params.lora_init_without_apply = true;
  2514. }
  2515. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2516. add_opt(common_arg(
  2517. {"--simple-io"},
  2518. "use basic IO for better compatibility in subprocesses and limited consoles",
  2519. [](common_params & params) {
  2520. params.simple_io = true;
  2521. }
  2522. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  2523. add_opt(common_arg(
  2524. {"--positive-file"}, "FNAME",
  2525. string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  2526. [](common_params & params, const std::string & value) {
  2527. params.cvector_positive_file = value;
  2528. }
  2529. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2530. add_opt(common_arg(
  2531. {"--negative-file"}, "FNAME",
  2532. string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  2533. [](common_params & params, const std::string & value) {
  2534. params.cvector_negative_file = value;
  2535. }
  2536. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2537. add_opt(common_arg(
  2538. {"--pca-batch"}, "N",
  2539. string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  2540. [](common_params & params, int value) {
  2541. params.n_pca_batch = value;
  2542. }
  2543. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2544. add_opt(common_arg(
  2545. {"--pca-iter"}, "N",
  2546. string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  2547. [](common_params & params, int value) {
  2548. params.n_pca_iterations = value;
  2549. }
  2550. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2551. add_opt(common_arg(
  2552. {"--method"}, "{pca, mean}",
  2553. "dimensionality reduction method to be used (default: pca)",
  2554. [](common_params & params, const std::string & value) {
  2555. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  2556. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  2557. else { throw std::invalid_argument("invalid value"); }
  2558. }
  2559. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2560. add_opt(common_arg(
  2561. {"--output-format"}, "{md,jsonl}",
  2562. "output format for batched-bench results (default: md)",
  2563. [](common_params & params, const std::string & value) {
  2564. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  2565. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  2566. else { throw std::invalid_argument("invalid value"); }
  2567. }
  2568. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2569. add_opt(common_arg(
  2570. {"--log-disable"},
  2571. "Log disable",
  2572. [](common_params &) {
  2573. common_log_pause(common_log_main());
  2574. }
  2575. ));
  2576. add_opt(common_arg(
  2577. {"--log-file"}, "FNAME",
  2578. "Log to file",
  2579. [](common_params &, const std::string & value) {
  2580. common_log_set_file(common_log_main(), value.c_str());
  2581. }
  2582. ).set_env("LLAMA_LOG_FILE"));
  2583. add_opt(common_arg(
  2584. {"--log-colors"}, "[on|off|auto]",
  2585. "Set colored logging ('on', 'off', or 'auto', default: 'auto')\n"
  2586. "'auto' enables colors when output is to a terminal",
  2587. [](common_params &, const std::string & value) {
  2588. if (is_truthy(value)) {
  2589. common_log_set_colors(common_log_main(), LOG_COLORS_ENABLED);
  2590. } else if (is_falsey(value)) {
  2591. common_log_set_colors(common_log_main(), LOG_COLORS_DISABLED);
  2592. } else if (is_autoy(value)) {
  2593. common_log_set_colors(common_log_main(), LOG_COLORS_AUTO);
  2594. } else {
  2595. throw std::invalid_argument(
  2596. string_format("error: unkown value for --log-colors: '%s'\n", value.c_str()));
  2597. }
  2598. }
  2599. ).set_env("LLAMA_LOG_COLORS"));
  2600. add_opt(common_arg(
  2601. {"-v", "--verbose", "--log-verbose"},
  2602. "Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
  2603. [](common_params & params) {
  2604. params.verbosity = INT_MAX;
  2605. common_log_set_verbosity_thold(INT_MAX);
  2606. }
  2607. ));
  2608. add_opt(common_arg(
  2609. {"--offline"},
  2610. "Offline mode: forces use of cache, prevents network access",
  2611. [](common_params & params) {
  2612. params.offline = true;
  2613. }
  2614. ).set_env("LLAMA_OFFLINE"));
  2615. add_opt(common_arg(
  2616. {"-lv", "--verbosity", "--log-verbosity"}, "N",
  2617. string_format("Set the verbosity threshold. Messages with a higher verbosity will be ignored. Values:\n"
  2618. " - 0: generic output\n"
  2619. " - 1: error\n"
  2620. " - 2: warning\n"
  2621. " - 3: info\n"
  2622. " - 4: debug\n"
  2623. "(default: %d)\n", params.verbosity),
  2624. [](common_params & params, int value) {
  2625. params.verbosity = value;
  2626. common_log_set_verbosity_thold(value);
  2627. }
  2628. ).set_env("LLAMA_LOG_VERBOSITY"));
  2629. add_opt(common_arg(
  2630. {"--log-prefix"},
  2631. "Enable prefix in log messages",
  2632. [](common_params &) {
  2633. common_log_set_prefix(common_log_main(), true);
  2634. }
  2635. ).set_env("LLAMA_LOG_PREFIX"));
  2636. add_opt(common_arg(
  2637. {"--log-timestamps"},
  2638. "Enable timestamps in log messages",
  2639. [](common_params &) {
  2640. common_log_set_timestamps(common_log_main(), true);
  2641. }
  2642. ).set_env("LLAMA_LOG_TIMESTAMPS"));
  2643. // speculative parameters
  2644. add_opt(common_arg(
  2645. {"-td", "--threads-draft"}, "N",
  2646. "number of threads to use during generation (default: same as --threads)",
  2647. [](common_params & params, int value) {
  2648. params.speculative.cpuparams.n_threads = value;
  2649. if (params.speculative.cpuparams.n_threads <= 0) {
  2650. params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
  2651. }
  2652. }
  2653. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2654. add_opt(common_arg(
  2655. {"-tbd", "--threads-batch-draft"}, "N",
  2656. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  2657. [](common_params & params, int value) {
  2658. params.speculative.cpuparams_batch.n_threads = value;
  2659. if (params.speculative.cpuparams_batch.n_threads <= 0) {
  2660. params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  2661. }
  2662. }
  2663. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2664. add_opt(common_arg(
  2665. {"-Cd", "--cpu-mask-draft"}, "M",
  2666. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2667. [](common_params & params, const std::string & mask) {
  2668. params.speculative.cpuparams.mask_valid = true;
  2669. if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) {
  2670. throw std::invalid_argument("invalid cpumask");
  2671. }
  2672. }
  2673. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2674. add_opt(common_arg(
  2675. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  2676. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  2677. [](common_params & params, const std::string & range) {
  2678. params.speculative.cpuparams.mask_valid = true;
  2679. if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) {
  2680. throw std::invalid_argument("invalid range");
  2681. }
  2682. }
  2683. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2684. add_opt(common_arg(
  2685. {"--cpu-strict-draft"}, "<0|1>",
  2686. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  2687. [](common_params & params, int value) {
  2688. params.speculative.cpuparams.strict_cpu = value;
  2689. }
  2690. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2691. add_opt(common_arg(
  2692. {"--prio-draft"}, "N",
  2693. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority),
  2694. [](common_params & params, int prio) {
  2695. if (prio < 0 || prio > 3) {
  2696. throw std::invalid_argument("invalid value");
  2697. }
  2698. params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio;
  2699. }
  2700. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2701. add_opt(common_arg(
  2702. {"--poll-draft"}, "<0|1>",
  2703. "Use polling to wait for draft model work (default: same as --poll])",
  2704. [](common_params & params, int value) {
  2705. params.speculative.cpuparams.poll = value;
  2706. }
  2707. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2708. add_opt(common_arg(
  2709. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  2710. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2711. [](common_params & params, const std::string & mask) {
  2712. params.speculative.cpuparams_batch.mask_valid = true;
  2713. if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) {
  2714. throw std::invalid_argument("invalid cpumask");
  2715. }
  2716. }
  2717. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2718. add_opt(common_arg(
  2719. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  2720. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  2721. [](common_params & params, const std::string & range) {
  2722. params.speculative.cpuparams_batch.mask_valid = true;
  2723. if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) {
  2724. throw std::invalid_argument("invalid cpumask");
  2725. }
  2726. }
  2727. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2728. add_opt(common_arg(
  2729. {"--cpu-strict-batch-draft"}, "<0|1>",
  2730. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  2731. [](common_params & params, int value) {
  2732. params.speculative.cpuparams_batch.strict_cpu = value;
  2733. }
  2734. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2735. add_opt(common_arg(
  2736. {"--prio-batch-draft"}, "N",
  2737. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority),
  2738. [](common_params & params, int prio) {
  2739. if (prio < 0 || prio > 3) {
  2740. throw std::invalid_argument("invalid value");
  2741. }
  2742. params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  2743. }
  2744. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2745. add_opt(common_arg(
  2746. {"--poll-batch-draft"}, "<0|1>",
  2747. "Use polling to wait for draft model work (default: --poll-draft)",
  2748. [](common_params & params, int value) {
  2749. params.speculative.cpuparams_batch.poll = value;
  2750. }
  2751. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2752. add_opt(common_arg(
  2753. {"--draft-max", "--draft", "--draft-n"}, "N",
  2754. string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max),
  2755. [](common_params & params, int value) {
  2756. params.speculative.n_max = value;
  2757. }
  2758. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MAX"));
  2759. add_opt(common_arg(
  2760. {"--draft-min", "--draft-n-min"}, "N",
  2761. string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min),
  2762. [](common_params & params, int value) {
  2763. params.speculative.n_min = value;
  2764. }
  2765. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MIN"));
  2766. add_opt(common_arg(
  2767. {"--draft-p-split"}, "P",
  2768. string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split),
  2769. [](common_params & params, const std::string & value) {
  2770. params.speculative.p_split = std::stof(value);
  2771. }
  2772. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT"));
  2773. add_opt(common_arg(
  2774. {"--draft-p-min"}, "P",
  2775. string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min),
  2776. [](common_params & params, const std::string & value) {
  2777. params.speculative.p_min = std::stof(value);
  2778. }
  2779. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_P_MIN"));
  2780. add_opt(common_arg(
  2781. {"-cd", "--ctx-size-draft"}, "N",
  2782. string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx),
  2783. [](common_params & params, int value) {
  2784. params.speculative.n_ctx = value;
  2785. }
  2786. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT"));
  2787. add_opt(common_arg(
  2788. {"-devd", "--device-draft"}, "<dev1,dev2,..>",
  2789. "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n"
  2790. "use --list-devices to see a list of available devices",
  2791. [](common_params & params, const std::string & value) {
  2792. params.speculative.devices = parse_device_list(value);
  2793. }
  2794. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2795. add_opt(common_arg(
  2796. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  2797. "number of layers to store in VRAM for the draft model",
  2798. [](common_params & params, int value) {
  2799. params.speculative.n_gpu_layers = value;
  2800. if (!llama_supports_gpu_offload()) {
  2801. fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
  2802. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2803. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2804. }
  2805. }
  2806. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT"));
  2807. add_opt(common_arg(
  2808. {"-md", "--model-draft"}, "FNAME",
  2809. "draft model for speculative decoding (default: unused)",
  2810. [](common_params & params, const std::string & value) {
  2811. params.speculative.model.path = value;
  2812. }
  2813. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
  2814. add_opt(common_arg(
  2815. {"--spec-replace"}, "TARGET", "DRAFT",
  2816. "translate the string in TARGET into DRAFT if the draft model and main model are not compatible",
  2817. [](common_params & params, const std::string & tgt, const std::string & dft) {
  2818. params.speculative.replacements.push_back({ tgt, dft });
  2819. }
  2820. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2821. add_opt(common_arg(
  2822. {"-ctkd", "--cache-type-k-draft"}, "TYPE",
  2823. string_format(
  2824. "KV cache data type for K for the draft model\n"
  2825. "allowed values: %s\n"
  2826. "(default: %s)",
  2827. get_all_kv_cache_types().c_str(),
  2828. ggml_type_name(params.speculative.cache_type_k)
  2829. ),
  2830. [](common_params & params, const std::string & value) {
  2831. params.speculative.cache_type_k = kv_cache_type_from_str(value);
  2832. }
  2833. ).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT"));
  2834. add_opt(common_arg(
  2835. {"-ctvd", "--cache-type-v-draft"}, "TYPE",
  2836. string_format(
  2837. "KV cache data type for V for the draft model\n"
  2838. "allowed values: %s\n"
  2839. "(default: %s)",
  2840. get_all_kv_cache_types().c_str(),
  2841. ggml_type_name(params.speculative.cache_type_v)
  2842. ),
  2843. [](common_params & params, const std::string & value) {
  2844. params.speculative.cache_type_v = kv_cache_type_from_str(value);
  2845. }
  2846. ).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT"));
  2847. add_opt(common_arg(
  2848. {"-mv", "--model-vocoder"}, "FNAME",
  2849. "vocoder model for audio generation (default: unused)",
  2850. [](common_params & params, const std::string & value) {
  2851. params.vocoder.model.path = value;
  2852. }
  2853. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2854. add_opt(common_arg(
  2855. {"--tts-use-guide-tokens"},
  2856. "Use guide tokens to improve TTS word recall",
  2857. [](common_params & params) {
  2858. params.vocoder.use_guide_tokens = true;
  2859. }
  2860. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2861. add_opt(common_arg(
  2862. {"--tts-speaker-file"}, "FNAME",
  2863. "speaker file path for audio generation",
  2864. [](common_params & params, const std::string & value) {
  2865. params.vocoder.speaker_file = value;
  2866. }
  2867. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2868. add_opt(common_arg(
  2869. {"--diffusion-steps"}, "N",
  2870. string_format("number of diffusion steps (default: %d)", params.diffusion.steps),
  2871. [](common_params & params, int value) { params.diffusion.steps = value; }
  2872. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2873. add_opt(common_arg(
  2874. {"--diffusion-visual"},
  2875. string_format("enable visual diffusion mode (show progressive generation) (default: %s)", params.diffusion.visual_mode ? "true" : "false"),
  2876. [](common_params & params) { params.diffusion.visual_mode = true; }
  2877. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2878. add_opt(common_arg(
  2879. {"--diffusion-eps"}, "F",
  2880. string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps),
  2881. [](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); }
  2882. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2883. add_opt(common_arg(
  2884. {"--diffusion-algorithm"}, "N",
  2885. string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)", params.diffusion.algorithm),
  2886. [](common_params & params, int value) { params.diffusion.algorithm = value; }
  2887. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2888. add_opt(common_arg(
  2889. {"--diffusion-alg-temp"}, "F",
  2890. string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
  2891. [](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); }
  2892. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2893. add_opt(common_arg(
  2894. {"--diffusion-block-length"}, "N",
  2895. string_format("llada block length for generation (default: %d)", params.diffusion.block_length),
  2896. [](common_params & params, int value) { params.diffusion.block_length = value; }
  2897. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2898. add_opt(common_arg(
  2899. {"--diffusion-cfg-scale"}, "F",
  2900. string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale),
  2901. [](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); }
  2902. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2903. add_opt(common_arg(
  2904. {"--diffusion-add-gumbel-noise"}, "F",
  2905. string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"),
  2906. [](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); }
  2907. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  2908. add_opt(common_arg(
  2909. { "-lr", "--learning-rate" }, "ALPHA",
  2910. string_format("adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)", (double) params.lr.lr0),
  2911. [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); }
  2912. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2913. add_opt(common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA",
  2914. string_format("(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)",
  2915. (double) params.lr.lr_min),
  2916. [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); }
  2917. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2918. add_opt(common_arg(
  2919. {"-decay-epochs", "--learning-rate-decay-epochs"}, "ALPHA",
  2920. string_format("(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)", (double) params.lr.decay_epochs),
  2921. [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); }
  2922. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2923. add_opt(common_arg(
  2924. {"-wd", "--weight-decay"}, "WD",
  2925. string_format("adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).", (double) params.lr.wd),
  2926. [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); }
  2927. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2928. add_opt(common_arg(
  2929. {"-val-split", "--val-split"}, "FRACTION",
  2930. string_format("fraction of data to use as validation set for training (default: %.2g).", (double) params.val_split),
  2931. [](common_params & params, const std::string & value) { params.val_split = std::stof(value); }
  2932. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2933. add_opt(common_arg(
  2934. {"-epochs", "--epochs"}, "N",
  2935. string_format("optimizer max # of epochs (default: %d)", params.lr.epochs),
  2936. [](common_params & params, int epochs) { params.lr.epochs = epochs; }
  2937. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2938. add_opt(common_arg(
  2939. {"-opt", "--optimizer"}, "sgd|adamw", "adamw or sgd",
  2940. [](common_params & params, const std::string & name) {
  2941. params.optimizer = common_opt_get_optimizer(name.c_str());
  2942. if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) {
  2943. throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd");
  2944. }
  2945. }
  2946. ).set_examples({ LLAMA_EXAMPLE_FINETUNE }));
  2947. // presets
  2948. add_opt(common_arg(
  2949. {"--tts-oute-default"},
  2950. string_format("use default OuteTTS models (note: can download weights from the internet)"),
  2951. [](common_params & params) {
  2952. params.model.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
  2953. params.model.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
  2954. params.vocoder.model.hf_repo = "ggml-org/WavTokenizer";
  2955. params.vocoder.model.hf_file = "WavTokenizer-Large-75-F16.gguf";
  2956. }
  2957. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2958. add_opt(common_arg(
  2959. {"--embd-gemma-default"},
  2960. string_format("use default EmbeddingGemma model (note: can download weights from the internet)"),
  2961. [](common_params & params) {
  2962. params.model.hf_repo = "ggml-org/embeddinggemma-300M-qat-q4_0-GGUF";
  2963. params.model.hf_file = "embeddinggemma-300M-qat-Q4_0.gguf";
  2964. params.port = 8011;
  2965. params.n_ubatch = 2048;
  2966. params.n_batch = 2048;
  2967. params.n_parallel = 32;
  2968. params.n_ctx = 2048*params.n_parallel;
  2969. params.verbose_prompt = true;
  2970. params.embedding = true;
  2971. }
  2972. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  2973. add_opt(common_arg(
  2974. {"--fim-qwen-1.5b-default"},
  2975. string_format("use default Qwen 2.5 Coder 1.5B (note: can download weights from the internet)"),
  2976. [](common_params & params) {
  2977. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
  2978. params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
  2979. params.port = 8012;
  2980. params.n_ubatch = 1024;
  2981. params.n_batch = 1024;
  2982. params.n_ctx = 0;
  2983. params.n_cache_reuse = 256;
  2984. }
  2985. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2986. add_opt(common_arg(
  2987. {"--fim-qwen-3b-default"},
  2988. string_format("use default Qwen 2.5 Coder 3B (note: can download weights from the internet)"),
  2989. [](common_params & params) {
  2990. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
  2991. params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
  2992. params.port = 8012;
  2993. params.n_ubatch = 1024;
  2994. params.n_batch = 1024;
  2995. params.n_ctx = 0;
  2996. params.n_cache_reuse = 256;
  2997. }
  2998. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2999. add_opt(common_arg(
  3000. {"--fim-qwen-7b-default"},
  3001. string_format("use default Qwen 2.5 Coder 7B (note: can download weights from the internet)"),
  3002. [](common_params & params) {
  3003. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3004. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3005. params.port = 8012;
  3006. params.n_ubatch = 1024;
  3007. params.n_batch = 1024;
  3008. params.n_ctx = 0;
  3009. params.n_cache_reuse = 256;
  3010. }
  3011. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3012. add_opt(common_arg(
  3013. {"--fim-qwen-7b-spec"},
  3014. string_format("use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3015. [](common_params & params) {
  3016. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3017. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3018. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3019. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3020. params.port = 8012;
  3021. params.n_ubatch = 1024;
  3022. params.n_batch = 1024;
  3023. params.n_ctx = 0;
  3024. params.n_cache_reuse = 256;
  3025. }
  3026. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3027. add_opt(common_arg(
  3028. {"--fim-qwen-14b-spec"},
  3029. string_format("use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3030. [](common_params & params) {
  3031. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-14B-Q8_0-GGUF";
  3032. params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
  3033. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3034. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3035. params.port = 8012;
  3036. params.n_ubatch = 1024;
  3037. params.n_batch = 1024;
  3038. params.n_ctx = 0;
  3039. params.n_cache_reuse = 256;
  3040. }
  3041. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3042. add_opt(common_arg(
  3043. {"--fim-qwen-30b-default"},
  3044. string_format("use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet)"),
  3045. [](common_params & params) {
  3046. params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
  3047. params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
  3048. params.port = 8012;
  3049. params.n_ubatch = 1024;
  3050. params.n_batch = 1024;
  3051. params.n_ctx = 0;
  3052. params.n_cache_reuse = 256;
  3053. }
  3054. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3055. add_opt(common_arg(
  3056. {"--gpt-oss-20b-default"},
  3057. string_format("use gpt-oss-20b (note: can download weights from the internet)"),
  3058. [](common_params & params) {
  3059. params.model.hf_repo = "ggml-org/gpt-oss-20b-GGUF";
  3060. params.model.hf_file = "gpt-oss-20b-mxfp4.gguf";
  3061. params.port = 8013;
  3062. params.n_ubatch = 2048;
  3063. params.n_batch = 32768;
  3064. params.n_parallel = 2;
  3065. params.n_ctx = 131072*params.n_parallel;
  3066. params.sampling.temp = 1.0f;
  3067. params.sampling.top_p = 1.0f;
  3068. params.sampling.top_k = 0;
  3069. params.sampling.min_p = 0.01f;
  3070. params.use_jinja = true;
  3071. //params.default_template_kwargs["reasoning_effort"] = "\"high\"";
  3072. }
  3073. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3074. add_opt(common_arg(
  3075. {"--gpt-oss-120b-default"},
  3076. string_format("use gpt-oss-120b (note: can download weights from the internet)"),
  3077. [](common_params & params) {
  3078. params.model.hf_repo = "ggml-org/gpt-oss-120b-GGUF";
  3079. params.port = 8013;
  3080. params.n_ubatch = 2048;
  3081. params.n_batch = 32768;
  3082. params.n_parallel = 2;
  3083. params.n_ctx = 131072*params.n_parallel;
  3084. params.sampling.temp = 1.0f;
  3085. params.sampling.top_p = 1.0f;
  3086. params.sampling.top_k = 0;
  3087. params.sampling.min_p = 0.01f;
  3088. params.use_jinja = true;
  3089. //params.default_template_kwargs["reasoning_effort"] = "\"high\"";
  3090. }
  3091. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3092. add_opt(common_arg(
  3093. {"--vision-gemma-4b-default"},
  3094. string_format("use Gemma 3 4B QAT (note: can download weights from the internet)"),
  3095. [](common_params & params) {
  3096. params.model.hf_repo = "ggml-org/gemma-3-4b-it-qat-GGUF";
  3097. params.port = 8014;
  3098. params.n_ctx = 0;
  3099. params.use_jinja = true;
  3100. }
  3101. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3102. add_opt(common_arg(
  3103. {"--vision-gemma-12b-default"},
  3104. string_format("use Gemma 3 12B QAT (note: can download weights from the internet)"),
  3105. [](common_params & params) {
  3106. params.model.hf_repo = "ggml-org/gemma-3-12b-it-qat-GGUF";
  3107. params.port = 8014;
  3108. params.n_ctx = 0;
  3109. params.use_jinja = true;
  3110. }
  3111. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3112. return ctx_arg;
  3113. }