arg.cpp 151 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570
  1. #include "arg.h"
  2. #include "chat.h"
  3. #include "common.h"
  4. #include "gguf.h" // for reading GGUF splits
  5. #include "json-schema-to-grammar.h"
  6. #include "log.h"
  7. #include "sampling.h"
  8. // fix problem with std::min and std::max
  9. #if defined(_WIN32)
  10. #define WIN32_LEAN_AND_MEAN
  11. #ifndef NOMINMAX
  12. # define NOMINMAX
  13. #endif
  14. #include <windows.h>
  15. #endif
  16. #define JSON_ASSERT GGML_ASSERT
  17. #include <nlohmann/json.hpp>
  18. #include <algorithm>
  19. #include <climits>
  20. #include <cstdarg>
  21. #include <filesystem>
  22. #include <fstream>
  23. #include <list>
  24. #include <regex>
  25. #include <set>
  26. #include <string>
  27. #include <thread>
  28. #include <vector>
  29. //#define LLAMA_USE_CURL
  30. #if defined(LLAMA_USE_CURL)
  31. #include <curl/curl.h>
  32. #include <curl/easy.h>
  33. #include <future>
  34. #endif
  35. using json = nlohmann::ordered_json;
  36. std::initializer_list<enum llama_example> mmproj_examples = {
  37. LLAMA_EXAMPLE_MTMD,
  38. LLAMA_EXAMPLE_SERVER,
  39. };
  40. static std::string read_file(const std::string & fname) {
  41. std::ifstream file(fname);
  42. if (!file) {
  43. throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
  44. }
  45. std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
  46. file.close();
  47. return content;
  48. }
  49. static void write_file(const std::string & fname, const std::string & content) {
  50. std::ofstream file(fname);
  51. if (!file) {
  52. throw std::runtime_error(string_format("error: failed to open file '%s'\n", fname.c_str()));
  53. }
  54. file << content;
  55. file.close();
  56. }
  57. common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  58. this->examples = std::move(examples);
  59. return *this;
  60. }
  61. common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
  62. this->excludes = std::move(excludes);
  63. return *this;
  64. }
  65. common_arg & common_arg::set_env(const char * env) {
  66. help = help + "\n(env: " + env + ")";
  67. this->env = env;
  68. return *this;
  69. }
  70. common_arg & common_arg::set_sparam() {
  71. is_sparam = true;
  72. return *this;
  73. }
  74. bool common_arg::in_example(enum llama_example ex) {
  75. return examples.find(ex) != examples.end();
  76. }
  77. bool common_arg::is_exclude(enum llama_example ex) {
  78. return excludes.find(ex) != excludes.end();
  79. }
  80. bool common_arg::get_value_from_env(std::string & output) {
  81. if (env == nullptr) return false;
  82. char * value = std::getenv(env);
  83. if (value) {
  84. output = value;
  85. return true;
  86. }
  87. return false;
  88. }
  89. bool common_arg::has_value_from_env() {
  90. return env != nullptr && std::getenv(env);
  91. }
  92. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  93. std::vector<std::string> result;
  94. std::istringstream iss(input);
  95. std::string line;
  96. auto add_line = [&](const std::string& l) {
  97. if (l.length() <= max_char_per_line) {
  98. result.push_back(l);
  99. } else {
  100. std::istringstream line_stream(l);
  101. std::string word, current_line;
  102. while (line_stream >> word) {
  103. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  104. if (!current_line.empty()) result.push_back(current_line);
  105. current_line = word;
  106. } else {
  107. current_line += (!current_line.empty() ? " " : "") + word;
  108. }
  109. }
  110. if (!current_line.empty()) result.push_back(current_line);
  111. }
  112. };
  113. while (std::getline(iss, line)) {
  114. add_line(line);
  115. }
  116. return result;
  117. }
  118. std::string common_arg::to_string() {
  119. // params for printing to console
  120. const static int n_leading_spaces = 40;
  121. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  122. std::string leading_spaces(n_leading_spaces, ' ');
  123. std::ostringstream ss;
  124. for (const auto arg : args) {
  125. if (arg == args.front()) {
  126. if (args.size() == 1) {
  127. ss << arg;
  128. } else {
  129. // first arg is usually abbreviation, we need padding to make it more beautiful
  130. auto tmp = std::string(arg) + ", ";
  131. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  132. ss << tmp << spaces;
  133. }
  134. } else {
  135. ss << arg << (arg != args.back() ? ", " : "");
  136. }
  137. }
  138. if (value_hint) ss << " " << value_hint;
  139. if (value_hint_2) ss << " " << value_hint_2;
  140. if (ss.tellp() > n_leading_spaces - 3) {
  141. // current line is too long, add new line
  142. ss << "\n" << leading_spaces;
  143. } else {
  144. // padding between arg and help, same line
  145. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  146. }
  147. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  148. for (const auto & line : help_lines) {
  149. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  150. }
  151. return ss.str();
  152. }
  153. //
  154. // downloader
  155. //
  156. struct common_hf_file_res {
  157. std::string repo; // repo name with ":tag" removed
  158. std::string ggufFile;
  159. std::string mmprojFile;
  160. };
  161. #ifdef LLAMA_USE_CURL
  162. bool common_has_curl() {
  163. return true;
  164. }
  165. #ifdef __linux__
  166. #include <linux/limits.h>
  167. #elif defined(_WIN32)
  168. # if !defined(PATH_MAX)
  169. # define PATH_MAX MAX_PATH
  170. # endif
  171. #elif defined(_AIX)
  172. #include <sys/limits.h>
  173. #else
  174. #include <sys/syslimits.h>
  175. #endif
  176. #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
  177. //
  178. // CURL utils
  179. //
  180. using curl_ptr = std::unique_ptr<CURL, decltype(&curl_easy_cleanup)>;
  181. // cannot use unique_ptr for curl_slist, because we cannot update without destroying the old one
  182. struct curl_slist_ptr {
  183. struct curl_slist * ptr = nullptr;
  184. ~curl_slist_ptr() {
  185. if (ptr) {
  186. curl_slist_free_all(ptr);
  187. }
  188. }
  189. };
  190. #define CURL_MAX_RETRY 3
  191. #define CURL_RETRY_DELAY_SECONDS 2
  192. static bool curl_perform_with_retry(const std::string & url, CURL * curl, int max_attempts, int retry_delay_seconds, const char * method_name) {
  193. int remaining_attempts = max_attempts;
  194. while (remaining_attempts > 0) {
  195. LOG_INF("%s: %s %s (attempt %d of %d)...\n", __func__ , method_name, url.c_str(), max_attempts - remaining_attempts + 1, max_attempts);
  196. CURLcode res = curl_easy_perform(curl);
  197. if (res == CURLE_OK) {
  198. return true;
  199. }
  200. int exponential_backoff_delay = std::pow(retry_delay_seconds, max_attempts - remaining_attempts) * 1000;
  201. LOG_WRN("%s: curl_easy_perform() failed: %s, retrying after %d milliseconds...\n", __func__, curl_easy_strerror(res), exponential_backoff_delay);
  202. remaining_attempts--;
  203. if (remaining_attempts == 0) break;
  204. std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
  205. }
  206. LOG_ERR("%s: curl_easy_perform() failed after %d attempts\n", __func__, max_attempts);
  207. return false;
  208. }
  209. // download one single file from remote URL to local path
  210. static bool common_download_file_single(const std::string & url, const std::string & path, const std::string & bearer_token, bool offline) {
  211. // Check if the file already exists locally
  212. auto file_exists = std::filesystem::exists(path);
  213. // If the file exists, check its JSON metadata companion file.
  214. std::string metadata_path = path + ".json";
  215. nlohmann::json metadata; // TODO @ngxson : get rid of this json, use regex instead
  216. std::string etag;
  217. std::string last_modified;
  218. if (file_exists) {
  219. if (offline) {
  220. LOG_INF("%s: using cached file (offline mode): %s\n", __func__, path.c_str());
  221. return true; // skip verification/downloading
  222. }
  223. // Try and read the JSON metadata file (note: stream autoclosed upon exiting this block).
  224. std::ifstream metadata_in(metadata_path);
  225. if (metadata_in.good()) {
  226. try {
  227. metadata_in >> metadata;
  228. LOG_DBG("%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
  229. if (metadata.contains("etag") && metadata.at("etag").is_string()) {
  230. etag = metadata.at("etag");
  231. }
  232. if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
  233. last_modified = metadata.at("lastModified");
  234. }
  235. } catch (const nlohmann::json::exception & e) {
  236. LOG_ERR("%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
  237. }
  238. }
  239. // if we cannot open the metadata file, we assume that the downloaded file is not valid (etag and last-modified are left empty, so we will download it again)
  240. } else {
  241. if (offline) {
  242. LOG_ERR("%s: required file is not available in cache (offline mode): %s\n", __func__, path.c_str());
  243. return false;
  244. }
  245. LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str());
  246. }
  247. // Send a HEAD request to retrieve the etag and last-modified headers
  248. struct common_load_model_from_url_headers {
  249. std::string etag;
  250. std::string last_modified;
  251. };
  252. common_load_model_from_url_headers headers;
  253. bool head_request_ok = false;
  254. bool should_download = !file_exists; // by default, we should download if the file does not exist
  255. // Initialize libcurl
  256. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  257. curl_slist_ptr http_headers;
  258. if (!curl) {
  259. LOG_ERR("%s: error initializing libcurl\n", __func__);
  260. return false;
  261. }
  262. // Set the URL, allow to follow http redirection
  263. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  264. curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
  265. http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
  266. // Check if hf-token or bearer-token was specified
  267. if (!bearer_token.empty()) {
  268. std::string auth_header = "Authorization: Bearer " + bearer_token;
  269. http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
  270. }
  271. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  272. #if defined(_WIN32)
  273. // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
  274. // operating system. Currently implemented under MS-Windows.
  275. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  276. #endif
  277. typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *);
  278. auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
  279. common_load_model_from_url_headers * headers = (common_load_model_from_url_headers *) userdata;
  280. static std::regex header_regex("([^:]+): (.*)\r\n");
  281. static std::regex etag_regex("ETag", std::regex_constants::icase);
  282. static std::regex last_modified_regex("Last-Modified", std::regex_constants::icase);
  283. std::string header(buffer, n_items);
  284. std::smatch match;
  285. if (std::regex_match(header, match, header_regex)) {
  286. const std::string & key = match[1];
  287. const std::string & value = match[2];
  288. if (std::regex_match(key, match, etag_regex)) {
  289. headers->etag = value;
  290. } else if (std::regex_match(key, match, last_modified_regex)) {
  291. headers->last_modified = value;
  292. }
  293. }
  294. return n_items;
  295. };
  296. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
  297. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); // hide head request progress
  298. curl_easy_setopt(curl.get(), CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
  299. curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
  300. // we only allow retrying once for HEAD requests
  301. // this is for the use case of using running offline (no internet), retrying can be annoying
  302. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), 1, 0, "HEAD");
  303. if (!was_perform_successful) {
  304. head_request_ok = false;
  305. }
  306. long http_code = 0;
  307. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  308. if (http_code == 200) {
  309. head_request_ok = true;
  310. } else {
  311. LOG_WRN("%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
  312. head_request_ok = false;
  313. }
  314. // if head_request_ok is false, we don't have the etag or last-modified headers
  315. // we leave should_download as-is, which is true if the file does not exist
  316. if (head_request_ok) {
  317. // check if ETag or Last-Modified headers are different
  318. // if it is, we need to download the file again
  319. if (!etag.empty() && etag != headers.etag) {
  320. LOG_WRN("%s: ETag header is different (%s != %s): triggering a new download\n", __func__, etag.c_str(), headers.etag.c_str());
  321. should_download = true;
  322. } else if (!last_modified.empty() && last_modified != headers.last_modified) {
  323. LOG_WRN("%s: Last-Modified header is different (%s != %s): triggering a new download\n", __func__, last_modified.c_str(), headers.last_modified.c_str());
  324. should_download = true;
  325. }
  326. }
  327. if (should_download) {
  328. std::string path_temporary = path + ".downloadInProgress";
  329. if (file_exists) {
  330. LOG_WRN("%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
  331. if (remove(path.c_str()) != 0) {
  332. LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
  333. return false;
  334. }
  335. }
  336. // Set the output file
  337. struct FILE_deleter {
  338. void operator()(FILE * f) const {
  339. fclose(f);
  340. }
  341. };
  342. std::unique_ptr<FILE, FILE_deleter> outfile(fopen(path_temporary.c_str(), "wb"));
  343. if (!outfile) {
  344. LOG_ERR("%s: error opening local file for writing: %s\n", __func__, path.c_str());
  345. return false;
  346. }
  347. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * data, size_t size, size_t nmemb, void * fd);
  348. auto write_callback = [](void * data, size_t size, size_t nmemb, void * fd) -> size_t {
  349. return fwrite(data, size, nmemb, (FILE *)fd);
  350. };
  351. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 0L);
  352. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  353. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, outfile.get());
  354. // display download progress
  355. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 0L);
  356. // helper function to hide password in URL
  357. auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
  358. std::size_t protocol_pos = url.find("://");
  359. if (protocol_pos == std::string::npos) {
  360. return url; // Malformed URL
  361. }
  362. std::size_t at_pos = url.find('@', protocol_pos + 3);
  363. if (at_pos == std::string::npos) {
  364. return url; // No password in URL
  365. }
  366. return url.substr(0, protocol_pos + 3) + "********" + url.substr(at_pos);
  367. };
  368. // start the download
  369. LOG_INF("%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
  370. llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
  371. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS, "GET");
  372. if (!was_perform_successful) {
  373. return false;
  374. }
  375. long http_code = 0;
  376. curl_easy_getinfo (curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  377. if (http_code < 200 || http_code >= 400) {
  378. LOG_ERR("%s: invalid http status code received: %ld\n", __func__, http_code);
  379. return false;
  380. }
  381. // Causes file to be closed explicitly here before we rename it.
  382. outfile.reset();
  383. // Write the updated JSON metadata file.
  384. metadata.update({
  385. {"url", url},
  386. {"etag", headers.etag},
  387. {"lastModified", headers.last_modified}
  388. });
  389. write_file(metadata_path, metadata.dump(4));
  390. LOG_DBG("%s: file metadata saved: %s\n", __func__, metadata_path.c_str());
  391. if (rename(path_temporary.c_str(), path.c_str()) != 0) {
  392. LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
  393. return false;
  394. }
  395. } else {
  396. LOG_INF("%s: using cached file: %s\n", __func__, path.c_str());
  397. }
  398. return true;
  399. }
  400. // download multiple files from remote URLs to local paths
  401. // the input is a vector of pairs <url, path>
  402. static bool common_download_file_multiple(const std::vector<std::pair<std::string, std::string>> & urls, const std::string & bearer_token, bool offline) {
  403. // Prepare download in parallel
  404. std::vector<std::future<bool>> futures_download;
  405. for (auto const & item : urls) {
  406. futures_download.push_back(std::async(std::launch::async, [bearer_token, offline](const std::pair<std::string, std::string> & it) -> bool {
  407. return common_download_file_single(it.first, it.second, bearer_token, offline);
  408. }, item));
  409. }
  410. // Wait for all downloads to complete
  411. for (auto & f : futures_download) {
  412. if (!f.get()) {
  413. return false;
  414. }
  415. }
  416. return true;
  417. }
  418. static bool common_download_model(
  419. const common_params_model & model,
  420. const std::string & bearer_token,
  421. bool offline) {
  422. // Basic validation of the model.url
  423. if (model.url.empty()) {
  424. LOG_ERR("%s: invalid model url\n", __func__);
  425. return false;
  426. }
  427. if (!common_download_file_single(model.url, model.path, bearer_token, offline)) {
  428. return false;
  429. }
  430. // check for additional GGUFs split to download
  431. int n_split = 0;
  432. {
  433. struct gguf_init_params gguf_params = {
  434. /*.no_alloc = */ true,
  435. /*.ctx = */ NULL,
  436. };
  437. auto * ctx_gguf = gguf_init_from_file(model.path.c_str(), gguf_params);
  438. if (!ctx_gguf) {
  439. LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, model.path.c_str());
  440. return false;
  441. }
  442. auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
  443. if (key_n_split >= 0) {
  444. n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
  445. }
  446. gguf_free(ctx_gguf);
  447. }
  448. if (n_split > 1) {
  449. char split_prefix[PATH_MAX] = {0};
  450. char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  451. // Verify the first split file format
  452. // and extract split URL and PATH prefixes
  453. {
  454. if (!llama_split_prefix(split_prefix, sizeof(split_prefix), model.path.c_str(), 0, n_split)) {
  455. LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, model.path.c_str(), n_split);
  456. return false;
  457. }
  458. if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model.url.c_str(), 0, n_split)) {
  459. LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model.url.c_str(), n_split);
  460. return false;
  461. }
  462. }
  463. std::vector<std::pair<std::string, std::string>> urls;
  464. for (int idx = 1; idx < n_split; idx++) {
  465. char split_path[PATH_MAX] = {0};
  466. llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
  467. char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  468. llama_split_path(split_url, sizeof(split_url), split_url_prefix, idx, n_split);
  469. if (std::string(split_path) == model.path) {
  470. continue; // skip the already downloaded file
  471. }
  472. urls.push_back({split_url, split_path});
  473. }
  474. // Download in parallel
  475. common_download_file_multiple(urls, bearer_token, offline);
  476. }
  477. return true;
  478. }
  479. std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params & params) {
  480. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  481. curl_slist_ptr http_headers;
  482. std::vector<char> res_buffer;
  483. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  484. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
  485. curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
  486. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
  487. auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t {
  488. auto data_vec = static_cast<std::vector<char> *>(data);
  489. data_vec->insert(data_vec->end(), (char *)ptr, (char *)ptr + size * nmemb);
  490. return size * nmemb;
  491. };
  492. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  493. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_buffer);
  494. #if defined(_WIN32)
  495. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  496. #endif
  497. if (params.timeout > 0) {
  498. curl_easy_setopt(curl.get(), CURLOPT_TIMEOUT, params.timeout);
  499. }
  500. if (params.max_size > 0) {
  501. curl_easy_setopt(curl.get(), CURLOPT_MAXFILESIZE, params.max_size);
  502. }
  503. http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
  504. for (const auto & header : params.headers) {
  505. http_headers.ptr = curl_slist_append(http_headers.ptr, header.c_str());
  506. }
  507. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  508. CURLcode res = curl_easy_perform(curl.get());
  509. if (res != CURLE_OK) {
  510. std::string error_msg = curl_easy_strerror(res);
  511. throw std::runtime_error("error: cannot make GET request: " + error_msg);
  512. }
  513. long res_code;
  514. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code);
  515. return { res_code, std::move(res_buffer) };
  516. }
  517. /**
  518. * Allow getting the HF file from the HF repo with tag (like ollama), for example:
  519. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q4
  520. * - bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
  521. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q5_k_s
  522. * Tag is optional, default to "latest" (meaning it checks for Q4_K_M first, then Q4, then if not found, return the first GGUF file in repo)
  523. *
  524. * Return pair of <repo, file> (with "repo" already having tag removed)
  525. *
  526. * Note: we use the Ollama-compatible HF API, but not using the blobId. Instead, we use the special "ggufFile" field which returns the value for "hf_file". This is done to be backward-compatible with existing cache files.
  527. */
  528. static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_with_tag, const std::string & bearer_token, bool offline) {
  529. auto parts = string_split<std::string>(hf_repo_with_tag, ':');
  530. std::string tag = parts.size() > 1 ? parts.back() : "latest";
  531. std::string hf_repo = parts[0];
  532. if (string_split<std::string>(hf_repo, '/').size() != 2) {
  533. throw std::invalid_argument("error: invalid HF repo format, expected <user>/<model>[:quant]\n");
  534. }
  535. std::string url = get_model_endpoint() + "v2/" + hf_repo + "/manifests/" + tag;
  536. // headers
  537. std::vector<std::string> headers;
  538. headers.push_back("Accept: application/json");
  539. if (!bearer_token.empty()) {
  540. headers.push_back("Authorization: Bearer " + bearer_token);
  541. }
  542. // Important: the User-Agent must be "llama-cpp" to get the "ggufFile" field in the response
  543. // User-Agent header is already set in common_remote_get_content, no need to set it here
  544. // we use "=" to avoid clashing with other component, while still being allowed on windows
  545. std::string cached_response_fname = "manifest=" + hf_repo + "=" + tag + ".json";
  546. string_replace_all(cached_response_fname, "/", "_");
  547. std::string cached_response_path = fs_get_cache_file(cached_response_fname);
  548. // make the request
  549. common_remote_params params;
  550. params.headers = headers;
  551. long res_code = 0;
  552. std::string res_str;
  553. bool use_cache = false;
  554. if (!offline) {
  555. try {
  556. auto res = common_remote_get_content(url, params);
  557. res_code = res.first;
  558. res_str = std::string(res.second.data(), res.second.size());
  559. } catch (const std::exception & e) {
  560. LOG_WRN("error: failed to get manifest at %s: %s\n", url.c_str(), e.what());
  561. }
  562. }
  563. if (res_code == 0) {
  564. if (std::filesystem::exists(cached_response_path)) {
  565. LOG_WRN("trying to read manifest from cache: %s\n", cached_response_path.c_str());
  566. res_str = read_file(cached_response_path);
  567. res_code = 200;
  568. use_cache = true;
  569. } else {
  570. throw std::runtime_error(
  571. offline ? "error: failed to get manifest (offline mode)"
  572. : "error: failed to get manifest (check your internet connection)");
  573. }
  574. }
  575. std::string ggufFile;
  576. std::string mmprojFile;
  577. if (res_code == 200 || res_code == 304) {
  578. // extract ggufFile.rfilename in json, using regex
  579. {
  580. std::regex pattern("\"ggufFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\"");
  581. std::smatch match;
  582. if (std::regex_search(res_str, match, pattern)) {
  583. ggufFile = match[1].str();
  584. }
  585. }
  586. // extract mmprojFile.rfilename in json, using regex
  587. {
  588. std::regex pattern("\"mmprojFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\"");
  589. std::smatch match;
  590. if (std::regex_search(res_str, match, pattern)) {
  591. mmprojFile = match[1].str();
  592. }
  593. }
  594. if (!use_cache) {
  595. // if not using cached response, update the cache file
  596. write_file(cached_response_path, res_str);
  597. }
  598. } else if (res_code == 401) {
  599. throw std::runtime_error("error: model is private or does not exist; if you are accessing a gated model, please provide a valid HF token");
  600. } else {
  601. throw std::runtime_error(string_format("error from HF API, response code: %ld, data: %s", res_code, res_str.c_str()));
  602. }
  603. // check response
  604. if (ggufFile.empty()) {
  605. throw std::runtime_error("error: model does not have ggufFile");
  606. }
  607. return { hf_repo, ggufFile, mmprojFile };
  608. }
  609. #else
  610. bool common_has_curl() {
  611. return false;
  612. }
  613. static bool common_download_file_single(const std::string &, const std::string &, const std::string &, bool) {
  614. LOG_ERR("error: built without CURL, cannot download model from internet\n");
  615. return false;
  616. }
  617. static bool common_download_file_multiple(const std::vector<std::pair<std::string, std::string>> &, const std::string &, bool) {
  618. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  619. return false;
  620. }
  621. static bool common_download_model(
  622. const common_params_model &,
  623. const std::string &,
  624. bool) {
  625. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  626. return false;
  627. }
  628. static struct common_hf_file_res common_get_hf_file(const std::string &, const std::string &, bool) {
  629. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  630. return {};
  631. }
  632. std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params &) {
  633. if (!url.empty()) {
  634. throw std::runtime_error("error: built without CURL, cannot download model from the internet");
  635. }
  636. return {};
  637. }
  638. #endif // LLAMA_USE_CURL
  639. //
  640. // utils
  641. //
  642. // Helper function to parse tensor buffer override strings
  643. static void parse_tensor_buffer_overrides(const std::string & value, std::vector<llama_model_tensor_buft_override> & overrides) {
  644. std::map<std::string, ggml_backend_buffer_type_t> buft_list;
  645. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  646. auto * dev = ggml_backend_dev_get(i);
  647. auto * buft = ggml_backend_dev_buffer_type(dev);
  648. if (buft) {
  649. buft_list[ggml_backend_buft_name(buft)] = buft;
  650. }
  651. }
  652. for (const auto & override : string_split<std::string>(value, ',')) {
  653. std::string::size_type pos = override.find('=');
  654. if (pos == std::string::npos) {
  655. throw std::invalid_argument("invalid value");
  656. }
  657. std::string tensor_name = override.substr(0, pos);
  658. std::string buffer_type = override.substr(pos + 1);
  659. if (buft_list.find(buffer_type) == buft_list.end()) {
  660. printf("Available buffer types:\n");
  661. for (const auto & it : buft_list) {
  662. printf(" %s\n", ggml_backend_buft_name(it.second));
  663. }
  664. throw std::invalid_argument("unknown buffer type");
  665. }
  666. // keep strings alive and avoid leaking memory by storing them in a static vector
  667. static std::list<std::string> buft_overrides;
  668. buft_overrides.push_back(tensor_name);
  669. overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
  670. }
  671. }
  672. struct handle_model_result {
  673. bool found_mmproj = false;
  674. common_params_model mmproj;
  675. };
  676. static handle_model_result common_params_handle_model(
  677. struct common_params_model & model,
  678. const std::string & bearer_token,
  679. const std::string & model_path_default,
  680. bool offline) {
  681. handle_model_result result;
  682. // handle pre-fill default model path and url based on hf_repo and hf_file
  683. {
  684. if (!model.hf_repo.empty()) {
  685. // short-hand to avoid specifying --hf-file -> default it to --model
  686. if (model.hf_file.empty()) {
  687. if (model.path.empty()) {
  688. auto auto_detected = common_get_hf_file(model.hf_repo, bearer_token, offline);
  689. if (auto_detected.repo.empty() || auto_detected.ggufFile.empty()) {
  690. exit(1); // built without CURL, error message already printed
  691. }
  692. model.hf_repo = auto_detected.repo;
  693. model.hf_file = auto_detected.ggufFile;
  694. if (!auto_detected.mmprojFile.empty()) {
  695. result.found_mmproj = true;
  696. result.mmproj.hf_repo = model.hf_repo;
  697. result.mmproj.hf_file = auto_detected.mmprojFile;
  698. }
  699. } else {
  700. model.hf_file = model.path;
  701. }
  702. }
  703. std::string model_endpoint = get_model_endpoint();
  704. model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
  705. // make sure model path is present (for caching purposes)
  706. if (model.path.empty()) {
  707. // this is to avoid different repo having same file name, or same file name in different subdirs
  708. std::string filename = model.hf_repo + "_" + model.hf_file;
  709. // to make sure we don't have any slashes in the filename
  710. string_replace_all(filename, "/", "_");
  711. model.path = fs_get_cache_file(filename);
  712. }
  713. } else if (!model.url.empty()) {
  714. if (model.path.empty()) {
  715. auto f = string_split<std::string>(model.url, '#').front();
  716. f = string_split<std::string>(f, '?').front();
  717. model.path = fs_get_cache_file(string_split<std::string>(f, '/').back());
  718. }
  719. } else if (model.path.empty()) {
  720. model.path = model_path_default;
  721. }
  722. }
  723. // then, download it if needed
  724. if (!model.url.empty()) {
  725. bool ok = common_download_model(model, bearer_token, offline);
  726. if (!ok) {
  727. LOG_ERR("error: failed to download model from %s\n", model.url.c_str());
  728. exit(1);
  729. }
  730. }
  731. return result;
  732. }
  733. const std::vector<ggml_type> kv_cache_types = {
  734. GGML_TYPE_F32,
  735. GGML_TYPE_F16,
  736. GGML_TYPE_BF16,
  737. GGML_TYPE_Q8_0,
  738. GGML_TYPE_Q4_0,
  739. GGML_TYPE_Q4_1,
  740. GGML_TYPE_IQ4_NL,
  741. GGML_TYPE_Q5_0,
  742. GGML_TYPE_Q5_1,
  743. };
  744. static ggml_type kv_cache_type_from_str(const std::string & s) {
  745. for (const auto & type : kv_cache_types) {
  746. if (ggml_type_name(type) == s) {
  747. return type;
  748. }
  749. }
  750. throw std::runtime_error("Unsupported cache type: " + s);
  751. }
  752. static std::string get_all_kv_cache_types() {
  753. std::ostringstream msg;
  754. for (const auto & type : kv_cache_types) {
  755. msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
  756. }
  757. return msg.str();
  758. }
  759. //
  760. // CLI argument parsing functions
  761. //
  762. static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) {
  763. std::string arg;
  764. const std::string arg_prefix = "--";
  765. common_params & params = ctx_arg.params;
  766. std::unordered_map<std::string, common_arg *> arg_to_options;
  767. for (auto & opt : ctx_arg.options) {
  768. for (const auto & arg : opt.args) {
  769. arg_to_options[arg] = &opt;
  770. }
  771. }
  772. // handle environment variables
  773. for (auto & opt : ctx_arg.options) {
  774. std::string value;
  775. if (opt.get_value_from_env(value)) {
  776. try {
  777. if (opt.handler_void && (value == "1" || value == "true")) {
  778. opt.handler_void(params);
  779. }
  780. if (opt.handler_int) {
  781. opt.handler_int(params, std::stoi(value));
  782. }
  783. if (opt.handler_string) {
  784. opt.handler_string(params, value);
  785. continue;
  786. }
  787. } catch (std::exception & e) {
  788. throw std::invalid_argument(string_format(
  789. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  790. }
  791. }
  792. }
  793. // handle command line arguments
  794. auto check_arg = [&](int i) {
  795. if (i+1 >= argc) {
  796. throw std::invalid_argument("expected value for argument");
  797. }
  798. };
  799. for (int i = 1; i < argc; i++) {
  800. const std::string arg_prefix = "--";
  801. std::string arg = argv[i];
  802. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  803. std::replace(arg.begin(), arg.end(), '_', '-');
  804. }
  805. if (arg_to_options.find(arg) == arg_to_options.end()) {
  806. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  807. }
  808. auto opt = *arg_to_options[arg];
  809. if (opt.has_value_from_env()) {
  810. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  811. }
  812. try {
  813. if (opt.handler_void) {
  814. opt.handler_void(params);
  815. continue;
  816. }
  817. // arg with single value
  818. check_arg(i);
  819. std::string val = argv[++i];
  820. if (opt.handler_int) {
  821. opt.handler_int(params, std::stoi(val));
  822. continue;
  823. }
  824. if (opt.handler_string) {
  825. opt.handler_string(params, val);
  826. continue;
  827. }
  828. // arg with 2 values
  829. check_arg(i);
  830. std::string val2 = argv[++i];
  831. if (opt.handler_str_str) {
  832. opt.handler_str_str(params, val, val2);
  833. continue;
  834. }
  835. } catch (std::exception & e) {
  836. throw std::invalid_argument(string_format(
  837. "error while handling argument \"%s\": %s\n\n"
  838. "usage:\n%s\n\nto show complete usage, run with -h",
  839. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  840. }
  841. }
  842. postprocess_cpu_params(params.cpuparams, nullptr);
  843. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  844. postprocess_cpu_params(params.speculative.cpuparams, &params.cpuparams);
  845. postprocess_cpu_params(params.speculative.cpuparams_batch, &params.cpuparams_batch);
  846. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  847. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  848. }
  849. // handle model and download
  850. {
  851. auto res = common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH, params.offline);
  852. if (params.no_mmproj) {
  853. params.mmproj = {};
  854. } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) {
  855. // optionally, handle mmproj model when -hf is specified
  856. params.mmproj = res.mmproj;
  857. }
  858. // only download mmproj if the current example is using it
  859. for (auto & ex : mmproj_examples) {
  860. if (ctx_arg.ex == ex) {
  861. common_params_handle_model(params.mmproj, params.hf_token, "", params.offline);
  862. break;
  863. }
  864. }
  865. common_params_handle_model(params.speculative.model, params.hf_token, "", params.offline);
  866. common_params_handle_model(params.vocoder.model, params.hf_token, "", params.offline);
  867. }
  868. if (params.escape) {
  869. string_process_escapes(params.prompt);
  870. string_process_escapes(params.input_prefix);
  871. string_process_escapes(params.input_suffix);
  872. for (auto & antiprompt : params.antiprompt) {
  873. string_process_escapes(antiprompt);
  874. }
  875. for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
  876. string_process_escapes(seq_breaker);
  877. }
  878. for (auto & pair : params.speculative.replacements) {
  879. string_process_escapes(pair.first);
  880. string_process_escapes(pair.second);
  881. }
  882. }
  883. if (!params.kv_overrides.empty()) {
  884. params.kv_overrides.emplace_back();
  885. params.kv_overrides.back().key[0] = 0;
  886. }
  887. if (!params.tensor_buft_overrides.empty()) {
  888. params.tensor_buft_overrides.push_back({nullptr, nullptr});
  889. }
  890. if (!params.speculative.tensor_buft_overrides.empty()) {
  891. params.speculative.tensor_buft_overrides.push_back({nullptr, nullptr});
  892. }
  893. if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
  894. throw std::runtime_error(string_format(
  895. "error: the supplied chat template is not supported: %s%s\n",
  896. params.chat_template.c_str(),
  897. params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
  898. ));
  899. }
  900. return true;
  901. }
  902. static void common_params_print_usage(common_params_context & ctx_arg) {
  903. auto print_options = [](std::vector<common_arg *> & options) {
  904. for (common_arg * opt : options) {
  905. printf("%s", opt->to_string().c_str());
  906. }
  907. };
  908. std::vector<common_arg *> common_options;
  909. std::vector<common_arg *> sparam_options;
  910. std::vector<common_arg *> specific_options;
  911. for (auto & opt : ctx_arg.options) {
  912. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  913. if (opt.is_sparam) {
  914. sparam_options.push_back(&opt);
  915. } else if (opt.in_example(ctx_arg.ex)) {
  916. specific_options.push_back(&opt);
  917. } else {
  918. common_options.push_back(&opt);
  919. }
  920. }
  921. printf("----- common params -----\n\n");
  922. print_options(common_options);
  923. printf("\n\n----- sampling params -----\n\n");
  924. print_options(sparam_options);
  925. // TODO: maybe convert enum llama_example to string
  926. printf("\n\n----- example-specific params -----\n\n");
  927. print_options(specific_options);
  928. }
  929. static void common_params_print_completion(common_params_context & ctx_arg) {
  930. std::vector<common_arg *> common_options;
  931. std::vector<common_arg *> sparam_options;
  932. std::vector<common_arg *> specific_options;
  933. for (auto & opt : ctx_arg.options) {
  934. if (opt.is_sparam) {
  935. sparam_options.push_back(&opt);
  936. } else if (opt.in_example(ctx_arg.ex)) {
  937. specific_options.push_back(&opt);
  938. } else {
  939. common_options.push_back(&opt);
  940. }
  941. }
  942. printf("_llama_completions() {\n");
  943. printf(" local cur prev opts\n");
  944. printf(" COMPREPLY=()\n");
  945. printf(" cur=\"${COMP_WORDS[COMP_CWORD]}\"\n");
  946. printf(" prev=\"${COMP_WORDS[COMP_CWORD-1]}\"\n\n");
  947. printf(" opts=\"");
  948. auto print_options = [](const std::vector<common_arg *> & options) {
  949. for (const common_arg * opt : options) {
  950. for (const char * arg : opt->args) {
  951. printf("%s ", arg);
  952. }
  953. }
  954. };
  955. print_options(common_options);
  956. print_options(sparam_options);
  957. print_options(specific_options);
  958. printf("\"\n\n");
  959. printf(" case \"$prev\" in\n");
  960. printf(" --model)\n");
  961. printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  962. printf(" return 0\n");
  963. printf(" ;;\n");
  964. printf(" --grammar-file)\n");
  965. printf(" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  966. printf(" return 0\n");
  967. printf(" ;;\n");
  968. printf(" --chat-template-file)\n");
  969. printf(" COMPREPLY=( $(compgen -f -X '!*.jinja' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  970. printf(" return 0\n");
  971. printf(" ;;\n");
  972. printf(" *)\n");
  973. printf(" COMPREPLY=( $(compgen -W \"${opts}\" -- \"$cur\") )\n");
  974. printf(" return 0\n");
  975. printf(" ;;\n");
  976. printf(" esac\n");
  977. printf("}\n\n");
  978. std::set<std::string> executables = {
  979. "llama-batched",
  980. "llama-batched-bench",
  981. "llama-bench",
  982. "llama-cli",
  983. "llama-convert-llama2c-to-ggml",
  984. "llama-cvector-generator",
  985. "llama-embedding",
  986. "llama-eval-callback",
  987. "llama-export-lora",
  988. "llama-gen-docs",
  989. "llama-gguf",
  990. "llama-gguf-hash",
  991. "llama-gguf-split",
  992. "llama-gritlm",
  993. "llama-imatrix",
  994. "llama-infill",
  995. "llama-mtmd-cli",
  996. "llama-llava-clip-quantize-cli",
  997. "llama-lookahead",
  998. "llama-lookup",
  999. "llama-lookup-create",
  1000. "llama-lookup-merge",
  1001. "llama-lookup-stats",
  1002. "llama-parallel",
  1003. "llama-passkey",
  1004. "llama-perplexity",
  1005. "llama-q8dot",
  1006. "llama-quantize",
  1007. "llama-qwen2vl-cli",
  1008. "llama-retrieval",
  1009. "llama-run",
  1010. "llama-save-load-state",
  1011. "llama-server",
  1012. "llama-simple",
  1013. "llama-simple-chat",
  1014. "llama-speculative",
  1015. "llama-speculative-simple",
  1016. "llama-tokenize",
  1017. "llama-tts",
  1018. "llama-vdot"
  1019. };
  1020. for (const auto& exe : executables) {
  1021. printf("complete -F _llama_completions %s\n", exe.c_str());
  1022. }
  1023. }
  1024. static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
  1025. std::vector<ggml_backend_dev_t> devices;
  1026. auto dev_names = string_split<std::string>(value, ',');
  1027. if (dev_names.empty()) {
  1028. throw std::invalid_argument("no devices specified");
  1029. }
  1030. if (dev_names.size() == 1 && dev_names[0] == "none") {
  1031. devices.push_back(nullptr);
  1032. } else {
  1033. for (const auto & device : dev_names) {
  1034. auto * dev = ggml_backend_dev_by_name(device.c_str());
  1035. if (!dev || ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_GPU) {
  1036. throw std::invalid_argument(string_format("invalid device: %s", device.c_str()));
  1037. }
  1038. devices.push_back(dev);
  1039. }
  1040. devices.push_back(nullptr);
  1041. }
  1042. return devices;
  1043. }
  1044. static void add_rpc_devices(std::string servers) {
  1045. auto rpc_servers = string_split<std::string>(servers, ',');
  1046. if (rpc_servers.empty()) {
  1047. throw std::invalid_argument("no RPC servers specified");
  1048. }
  1049. ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
  1050. if (!rpc_reg) {
  1051. throw std::invalid_argument("failed to find RPC backend");
  1052. }
  1053. typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
  1054. ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
  1055. if (!ggml_backend_rpc_add_device_fn) {
  1056. throw std::invalid_argument("failed to find RPC device add function");
  1057. }
  1058. for (const auto & server : rpc_servers) {
  1059. ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
  1060. if (dev) {
  1061. ggml_backend_device_register(dev);
  1062. } else {
  1063. throw std::invalid_argument("failed to register RPC device");
  1064. }
  1065. }
  1066. }
  1067. bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  1068. auto ctx_arg = common_params_parser_init(params, ex, print_usage);
  1069. const common_params params_org = ctx_arg.params; // the example can modify the default params
  1070. try {
  1071. if (!common_params_parse_ex(argc, argv, ctx_arg)) {
  1072. ctx_arg.params = params_org;
  1073. return false;
  1074. }
  1075. if (ctx_arg.params.usage) {
  1076. common_params_print_usage(ctx_arg);
  1077. if (ctx_arg.print_usage) {
  1078. ctx_arg.print_usage(argc, argv);
  1079. }
  1080. exit(0);
  1081. }
  1082. if (ctx_arg.params.completion) {
  1083. common_params_print_completion(ctx_arg);
  1084. exit(0);
  1085. }
  1086. } catch (const std::invalid_argument & ex) {
  1087. fprintf(stderr, "%s\n", ex.what());
  1088. ctx_arg.params = params_org;
  1089. return false;
  1090. } catch (std::exception & ex) {
  1091. fprintf(stderr, "%s\n", ex.what());
  1092. exit(1); // for other exceptions, we exit with status code 1
  1093. }
  1094. return true;
  1095. }
  1096. static std::string list_builtin_chat_templates() {
  1097. std::vector<const char *> supported_tmpl;
  1098. int32_t res = llama_chat_builtin_templates(nullptr, 0);
  1099. supported_tmpl.resize(res);
  1100. res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
  1101. std::ostringstream msg;
  1102. for (auto & tmpl : supported_tmpl) {
  1103. msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", ");
  1104. }
  1105. return msg.str();
  1106. }
  1107. common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  1108. // load dynamic backends
  1109. ggml_backend_load_all();
  1110. common_params_context ctx_arg(params);
  1111. ctx_arg.print_usage = print_usage;
  1112. ctx_arg.ex = ex;
  1113. std::string sampler_type_chars;
  1114. std::string sampler_type_names;
  1115. for (const auto & sampler : params.sampling.samplers) {
  1116. sampler_type_chars += common_sampler_type_to_chr(sampler);
  1117. sampler_type_names += common_sampler_type_to_str(sampler) + ";";
  1118. }
  1119. sampler_type_names.pop_back();
  1120. /**
  1121. * filter options by example
  1122. * rules:
  1123. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  1124. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  1125. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  1126. */
  1127. auto add_opt = [&](common_arg arg) {
  1128. if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
  1129. ctx_arg.options.push_back(std::move(arg));
  1130. }
  1131. };
  1132. add_opt(common_arg(
  1133. {"-h", "--help", "--usage"},
  1134. "print usage and exit",
  1135. [](common_params & params) {
  1136. params.usage = true;
  1137. }
  1138. ));
  1139. add_opt(common_arg(
  1140. {"--version"},
  1141. "show version and build info",
  1142. [](common_params &) {
  1143. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  1144. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  1145. exit(0);
  1146. }
  1147. ));
  1148. add_opt(common_arg(
  1149. {"--completion-bash"},
  1150. "print source-able bash completion script for llama.cpp",
  1151. [](common_params & params) {
  1152. params.completion = true;
  1153. }
  1154. ));
  1155. add_opt(common_arg(
  1156. {"--verbose-prompt"},
  1157. string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  1158. [](common_params & params) {
  1159. params.verbose_prompt = true;
  1160. }
  1161. ));
  1162. add_opt(common_arg(
  1163. {"--no-display-prompt"},
  1164. string_format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  1165. [](common_params & params) {
  1166. params.display_prompt = false;
  1167. }
  1168. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1169. add_opt(common_arg(
  1170. {"-co", "--color"},
  1171. string_format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"),
  1172. [](common_params & params) {
  1173. params.use_color = true;
  1174. }
  1175. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  1176. add_opt(common_arg(
  1177. {"-t", "--threads"}, "N",
  1178. string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
  1179. [](common_params & params, int value) {
  1180. params.cpuparams.n_threads = value;
  1181. if (params.cpuparams.n_threads <= 0) {
  1182. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  1183. }
  1184. }
  1185. ).set_env("LLAMA_ARG_THREADS"));
  1186. add_opt(common_arg(
  1187. {"-tb", "--threads-batch"}, "N",
  1188. "number of threads to use during batch and prompt processing (default: same as --threads)",
  1189. [](common_params & params, int value) {
  1190. params.cpuparams_batch.n_threads = value;
  1191. if (params.cpuparams_batch.n_threads <= 0) {
  1192. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  1193. }
  1194. }
  1195. ));
  1196. add_opt(common_arg(
  1197. {"-C", "--cpu-mask"}, "M",
  1198. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  1199. [](common_params & params, const std::string & mask) {
  1200. params.cpuparams.mask_valid = true;
  1201. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  1202. throw std::invalid_argument("invalid cpumask");
  1203. }
  1204. }
  1205. ));
  1206. add_opt(common_arg(
  1207. {"-Cr", "--cpu-range"}, "lo-hi",
  1208. "range of CPUs for affinity. Complements --cpu-mask",
  1209. [](common_params & params, const std::string & range) {
  1210. params.cpuparams.mask_valid = true;
  1211. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  1212. throw std::invalid_argument("invalid range");
  1213. }
  1214. }
  1215. ));
  1216. add_opt(common_arg(
  1217. {"--cpu-strict"}, "<0|1>",
  1218. string_format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  1219. [](common_params & params, const std::string & value) {
  1220. params.cpuparams.strict_cpu = std::stoul(value);
  1221. }
  1222. ));
  1223. add_opt(common_arg(
  1224. {"--prio"}, "N",
  1225. string_format("set process/thread priority : low(-1), normal(0), medium(1), high(2), realtime(3) (default: %d)\n", params.cpuparams.priority),
  1226. [](common_params & params, int prio) {
  1227. if (prio < GGML_SCHED_PRIO_LOW || prio > GGML_SCHED_PRIO_REALTIME) {
  1228. throw std::invalid_argument("invalid value");
  1229. }
  1230. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  1231. }
  1232. ));
  1233. add_opt(common_arg(
  1234. {"--poll"}, "<0...100>",
  1235. string_format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  1236. [](common_params & params, const std::string & value) {
  1237. params.cpuparams.poll = std::stoul(value);
  1238. }
  1239. ));
  1240. add_opt(common_arg(
  1241. {"-Cb", "--cpu-mask-batch"}, "M",
  1242. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  1243. [](common_params & params, const std::string & mask) {
  1244. params.cpuparams_batch.mask_valid = true;
  1245. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  1246. throw std::invalid_argument("invalid cpumask");
  1247. }
  1248. }
  1249. ));
  1250. add_opt(common_arg(
  1251. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  1252. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  1253. [](common_params & params, const std::string & range) {
  1254. params.cpuparams_batch.mask_valid = true;
  1255. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  1256. throw std::invalid_argument("invalid range");
  1257. }
  1258. }
  1259. ));
  1260. add_opt(common_arg(
  1261. {"--cpu-strict-batch"}, "<0|1>",
  1262. "use strict CPU placement (default: same as --cpu-strict)",
  1263. [](common_params & params, int value) {
  1264. params.cpuparams_batch.strict_cpu = value;
  1265. }
  1266. ));
  1267. add_opt(common_arg(
  1268. {"--prio-batch"}, "N",
  1269. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  1270. [](common_params & params, int prio) {
  1271. if (prio < 0 || prio > 3) {
  1272. throw std::invalid_argument("invalid value");
  1273. }
  1274. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  1275. }
  1276. ));
  1277. add_opt(common_arg(
  1278. {"--poll-batch"}, "<0|1>",
  1279. "use polling to wait for work (default: same as --poll)",
  1280. [](common_params & params, int value) {
  1281. params.cpuparams_batch.poll = value;
  1282. }
  1283. ));
  1284. add_opt(common_arg(
  1285. {"-lcs", "--lookup-cache-static"}, "FNAME",
  1286. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  1287. [](common_params & params, const std::string & value) {
  1288. params.lookup_cache_static = value;
  1289. }
  1290. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  1291. add_opt(common_arg(
  1292. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  1293. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  1294. [](common_params & params, const std::string & value) {
  1295. params.lookup_cache_dynamic = value;
  1296. }
  1297. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  1298. add_opt(common_arg(
  1299. {"-c", "--ctx-size"}, "N",
  1300. string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  1301. [](common_params & params, int value) {
  1302. params.n_ctx = value;
  1303. }
  1304. ).set_env("LLAMA_ARG_CTX_SIZE"));
  1305. add_opt(common_arg(
  1306. {"-n", "--predict", "--n-predict"}, "N",
  1307. string_format(
  1308. ex == LLAMA_EXAMPLE_MAIN
  1309. ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
  1310. : "number of tokens to predict (default: %d, -1 = infinity)",
  1311. params.n_predict),
  1312. [](common_params & params, int value) {
  1313. params.n_predict = value;
  1314. }
  1315. ).set_env("LLAMA_ARG_N_PREDICT"));
  1316. add_opt(common_arg(
  1317. {"-b", "--batch-size"}, "N",
  1318. string_format("logical maximum batch size (default: %d)", params.n_batch),
  1319. [](common_params & params, int value) {
  1320. params.n_batch = value;
  1321. }
  1322. ).set_env("LLAMA_ARG_BATCH"));
  1323. add_opt(common_arg(
  1324. {"-ub", "--ubatch-size"}, "N",
  1325. string_format("physical maximum batch size (default: %d)", params.n_ubatch),
  1326. [](common_params & params, int value) {
  1327. params.n_ubatch = value;
  1328. }
  1329. ).set_env("LLAMA_ARG_UBATCH"));
  1330. add_opt(common_arg(
  1331. {"--keep"}, "N",
  1332. string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  1333. [](common_params & params, int value) {
  1334. params.n_keep = value;
  1335. }
  1336. ));
  1337. add_opt(common_arg(
  1338. {"--swa-full"},
  1339. string_format("use full-size SWA cache (default: %s)\n"
  1340. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)", params.swa_full ? "true" : "false"),
  1341. [](common_params & params) {
  1342. params.swa_full = true;
  1343. }
  1344. ).set_env("LLAMA_ARG_SWA_FULL"));
  1345. add_opt(common_arg(
  1346. {"--kv-unified", "-kvu"},
  1347. string_format("use single unified KV buffer for the KV cache of all sequences (default: %s)\n"
  1348. "[(more info)](https://github.com/ggml-org/llama.cpp/pull/14363)", params.kv_unified ? "true" : "false"),
  1349. [](common_params & params) {
  1350. params.kv_unified = true;
  1351. }
  1352. ).set_env("LLAMA_ARG_KV_SPLIT"));
  1353. add_opt(common_arg(
  1354. {"--no-context-shift"},
  1355. string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
  1356. [](common_params & params) {
  1357. params.ctx_shift = false;
  1358. }
  1359. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
  1360. add_opt(common_arg(
  1361. {"--chunks"}, "N",
  1362. string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  1363. [](common_params & params, int value) {
  1364. params.n_chunks = value;
  1365. }
  1366. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  1367. add_opt(common_arg(
  1368. {"-fa", "--flash-attn"},
  1369. string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
  1370. [](common_params & params) {
  1371. params.flash_attn = true;
  1372. }
  1373. ).set_env("LLAMA_ARG_FLASH_ATTN"));
  1374. add_opt(common_arg(
  1375. {"-p", "--prompt"}, "PROMPT",
  1376. "prompt to start generation with; for system message, use -sys",
  1377. [](common_params & params, const std::string & value) {
  1378. params.prompt = value;
  1379. }
  1380. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1381. add_opt(common_arg(
  1382. {"-sys", "--system-prompt"}, "PROMPT",
  1383. "system prompt to use with model (if applicable, depending on chat template)",
  1384. [](common_params & params, const std::string & value) {
  1385. params.system_prompt = value;
  1386. }
  1387. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1388. add_opt(common_arg(
  1389. {"--no-perf"},
  1390. string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
  1391. [](common_params & params) {
  1392. params.no_perf = true;
  1393. params.sampling.no_perf = true;
  1394. }
  1395. ).set_env("LLAMA_ARG_NO_PERF"));
  1396. add_opt(common_arg(
  1397. {"-f", "--file"}, "FNAME",
  1398. "a file containing the prompt (default: none)",
  1399. [](common_params & params, const std::string & value) {
  1400. params.prompt = read_file(value);
  1401. // store the external file name in params
  1402. params.prompt_file = value;
  1403. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  1404. params.prompt.pop_back();
  1405. }
  1406. }
  1407. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1408. add_opt(common_arg(
  1409. {"-sysf", "--system-prompt-file"}, "FNAME",
  1410. "a file containing the system prompt (default: none)",
  1411. [](common_params & params, const std::string & value) {
  1412. params.system_prompt = read_file(value);
  1413. if (!params.system_prompt.empty() && params.system_prompt.back() == '\n') {
  1414. params.system_prompt.pop_back();
  1415. }
  1416. }
  1417. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1418. add_opt(common_arg(
  1419. {"--in-file"}, "FNAME",
  1420. "an input file (repeat to specify multiple files)",
  1421. [](common_params & params, const std::string & value) {
  1422. std::ifstream file(value);
  1423. if (!file) {
  1424. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1425. }
  1426. params.in_files.push_back(value);
  1427. }
  1428. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1429. add_opt(common_arg(
  1430. {"-bf", "--binary-file"}, "FNAME",
  1431. "binary file containing the prompt (default: none)",
  1432. [](common_params & params, const std::string & value) {
  1433. std::ifstream file(value, std::ios::binary);
  1434. if (!file) {
  1435. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1436. }
  1437. // store the external file name in params
  1438. params.prompt_file = value;
  1439. std::ostringstream ss;
  1440. ss << file.rdbuf();
  1441. params.prompt = ss.str();
  1442. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  1443. }
  1444. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1445. add_opt(common_arg(
  1446. {"-e", "--escape"},
  1447. string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  1448. [](common_params & params) {
  1449. params.escape = true;
  1450. }
  1451. ));
  1452. add_opt(common_arg(
  1453. {"--no-escape"},
  1454. "do not process escape sequences",
  1455. [](common_params & params) {
  1456. params.escape = false;
  1457. }
  1458. ));
  1459. add_opt(common_arg(
  1460. {"-ptc", "--print-token-count"}, "N",
  1461. string_format("print token count every N tokens (default: %d)", params.n_print),
  1462. [](common_params & params, int value) {
  1463. params.n_print = value;
  1464. }
  1465. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1466. add_opt(common_arg(
  1467. {"--prompt-cache"}, "FNAME",
  1468. "file to cache prompt state for faster startup (default: none)",
  1469. [](common_params & params, const std::string & value) {
  1470. params.path_prompt_cache = value;
  1471. }
  1472. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1473. add_opt(common_arg(
  1474. {"--prompt-cache-all"},
  1475. "if specified, saves user input and generations to cache as well\n",
  1476. [](common_params & params) {
  1477. params.prompt_cache_all = true;
  1478. }
  1479. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1480. add_opt(common_arg(
  1481. {"--prompt-cache-ro"},
  1482. "if specified, uses the prompt cache but does not update it",
  1483. [](common_params & params) {
  1484. params.prompt_cache_ro = true;
  1485. }
  1486. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1487. add_opt(common_arg(
  1488. {"-r", "--reverse-prompt"}, "PROMPT",
  1489. "halt generation at PROMPT, return control in interactive mode\n",
  1490. [](common_params & params, const std::string & value) {
  1491. params.antiprompt.emplace_back(value);
  1492. }
  1493. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  1494. add_opt(common_arg(
  1495. {"-sp", "--special"},
  1496. string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  1497. [](common_params & params) {
  1498. params.special = true;
  1499. }
  1500. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  1501. add_opt(common_arg(
  1502. {"-cnv", "--conversation"},
  1503. "run in conversation mode:\n"
  1504. "- does not print special tokens and suffix/prefix\n"
  1505. "- interactive mode is also enabled\n"
  1506. "(default: auto enabled if chat template is available)",
  1507. [](common_params & params) {
  1508. params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
  1509. }
  1510. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1511. add_opt(common_arg(
  1512. {"-no-cnv", "--no-conversation"},
  1513. "force disable conversation mode (default: false)",
  1514. [](common_params & params) {
  1515. params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
  1516. }
  1517. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1518. add_opt(common_arg(
  1519. {"-st", "--single-turn"},
  1520. "run conversation for a single turn only, then exit when done\n"
  1521. "will not be interactive if first turn is predefined with --prompt\n"
  1522. "(default: false)",
  1523. [](common_params & params) {
  1524. params.single_turn = true;
  1525. }
  1526. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1527. add_opt(common_arg(
  1528. {"-i", "--interactive"},
  1529. string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  1530. [](common_params & params) {
  1531. params.interactive = true;
  1532. }
  1533. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1534. add_opt(common_arg(
  1535. {"-if", "--interactive-first"},
  1536. string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  1537. [](common_params & params) {
  1538. params.interactive_first = true;
  1539. }
  1540. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1541. add_opt(common_arg(
  1542. {"-mli", "--multiline-input"},
  1543. "allows you to write or paste multiple lines without ending each in '\\'",
  1544. [](common_params & params) {
  1545. params.multiline_input = true;
  1546. }
  1547. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1548. add_opt(common_arg(
  1549. {"--in-prefix-bos"},
  1550. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  1551. [](common_params & params) {
  1552. params.input_prefix_bos = true;
  1553. params.enable_chat_template = false;
  1554. }
  1555. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1556. add_opt(common_arg(
  1557. {"--in-prefix"}, "STRING",
  1558. "string to prefix user inputs with (default: empty)",
  1559. [](common_params & params, const std::string & value) {
  1560. params.input_prefix = value;
  1561. params.enable_chat_template = false;
  1562. }
  1563. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1564. add_opt(common_arg(
  1565. {"--in-suffix"}, "STRING",
  1566. "string to suffix after user inputs with (default: empty)",
  1567. [](common_params & params, const std::string & value) {
  1568. params.input_suffix = value;
  1569. params.enable_chat_template = false;
  1570. }
  1571. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1572. add_opt(common_arg(
  1573. {"--no-warmup"},
  1574. "skip warming up the model with an empty run",
  1575. [](common_params & params) {
  1576. params.warmup = false;
  1577. }
  1578. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL}));
  1579. add_opt(common_arg(
  1580. {"--spm-infill"},
  1581. string_format(
  1582. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  1583. params.spm_infill ? "enabled" : "disabled"
  1584. ),
  1585. [](common_params & params) {
  1586. params.spm_infill = true;
  1587. }
  1588. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  1589. add_opt(common_arg(
  1590. {"--samplers"}, "SAMPLERS",
  1591. string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  1592. [](common_params & params, const std::string & value) {
  1593. const auto sampler_names = string_split<std::string>(value, ';');
  1594. params.sampling.samplers = common_sampler_types_from_names(sampler_names, true);
  1595. }
  1596. ).set_sparam());
  1597. add_opt(common_arg(
  1598. {"-s", "--seed"}, "SEED",
  1599. string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED),
  1600. [](common_params & params, const std::string & value) {
  1601. params.sampling.seed = std::stoul(value);
  1602. }
  1603. ).set_sparam());
  1604. add_opt(common_arg(
  1605. {"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
  1606. string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  1607. [](common_params & params, const std::string & value) {
  1608. params.sampling.samplers = common_sampler_types_from_chars(value);
  1609. }
  1610. ).set_sparam());
  1611. add_opt(common_arg(
  1612. {"--ignore-eos"},
  1613. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  1614. [](common_params & params) {
  1615. params.sampling.ignore_eos = true;
  1616. }
  1617. ).set_sparam());
  1618. add_opt(common_arg(
  1619. {"--temp"}, "N",
  1620. string_format("temperature (default: %.1f)", (double)params.sampling.temp),
  1621. [](common_params & params, const std::string & value) {
  1622. params.sampling.temp = std::stof(value);
  1623. params.sampling.temp = std::max(params.sampling.temp, 0.0f);
  1624. }
  1625. ).set_sparam());
  1626. add_opt(common_arg(
  1627. {"--top-k"}, "N",
  1628. string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k),
  1629. [](common_params & params, int value) {
  1630. params.sampling.top_k = value;
  1631. }
  1632. ).set_sparam());
  1633. add_opt(common_arg(
  1634. {"--top-p"}, "N",
  1635. string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p),
  1636. [](common_params & params, const std::string & value) {
  1637. params.sampling.top_p = std::stof(value);
  1638. }
  1639. ).set_sparam());
  1640. add_opt(common_arg(
  1641. {"--min-p"}, "N",
  1642. string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p),
  1643. [](common_params & params, const std::string & value) {
  1644. params.sampling.min_p = std::stof(value);
  1645. }
  1646. ).set_sparam());
  1647. add_opt(common_arg(
  1648. {"--top-nsigma"}, "N",
  1649. string_format("top-n-sigma sampling (default: %.1f, -1.0 = disabled)", params.sampling.top_n_sigma),
  1650. [](common_params & params, const std::string & value) {
  1651. params.sampling.top_n_sigma = std::stof(value);
  1652. }
  1653. ).set_examples({LLAMA_EXAMPLE_MAIN}).set_sparam());
  1654. add_opt(common_arg(
  1655. {"--xtc-probability"}, "N",
  1656. string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
  1657. [](common_params & params, const std::string & value) {
  1658. params.sampling.xtc_probability = std::stof(value);
  1659. }
  1660. ).set_sparam());
  1661. add_opt(common_arg(
  1662. {"--xtc-threshold"}, "N",
  1663. string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold),
  1664. [](common_params & params, const std::string & value) {
  1665. params.sampling.xtc_threshold = std::stof(value);
  1666. }
  1667. ).set_sparam());
  1668. add_opt(common_arg(
  1669. {"--typical"}, "N",
  1670. string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p),
  1671. [](common_params & params, const std::string & value) {
  1672. params.sampling.typ_p = std::stof(value);
  1673. }
  1674. ).set_sparam());
  1675. add_opt(common_arg(
  1676. {"--repeat-last-n"}, "N",
  1677. string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
  1678. [](common_params & params, int value) {
  1679. if (value < -1) {
  1680. throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
  1681. }
  1682. params.sampling.penalty_last_n = value;
  1683. params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
  1684. }
  1685. ).set_sparam());
  1686. add_opt(common_arg(
  1687. {"--repeat-penalty"}, "N",
  1688. string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat),
  1689. [](common_params & params, const std::string & value) {
  1690. params.sampling.penalty_repeat = std::stof(value);
  1691. }
  1692. ).set_sparam());
  1693. add_opt(common_arg(
  1694. {"--presence-penalty"}, "N",
  1695. string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present),
  1696. [](common_params & params, const std::string & value) {
  1697. params.sampling.penalty_present = std::stof(value);
  1698. }
  1699. ).set_sparam());
  1700. add_opt(common_arg(
  1701. {"--frequency-penalty"}, "N",
  1702. string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq),
  1703. [](common_params & params, const std::string & value) {
  1704. params.sampling.penalty_freq = std::stof(value);
  1705. }
  1706. ).set_sparam());
  1707. add_opt(common_arg(
  1708. {"--dry-multiplier"}, "N",
  1709. string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier),
  1710. [](common_params & params, const std::string & value) {
  1711. params.sampling.dry_multiplier = std::stof(value);
  1712. }
  1713. ).set_sparam());
  1714. add_opt(common_arg(
  1715. {"--dry-base"}, "N",
  1716. string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base),
  1717. [](common_params & params, const std::string & value) {
  1718. float potential_base = std::stof(value);
  1719. if (potential_base >= 1.0f)
  1720. {
  1721. params.sampling.dry_base = potential_base;
  1722. }
  1723. }
  1724. ).set_sparam());
  1725. add_opt(common_arg(
  1726. {"--dry-allowed-length"}, "N",
  1727. string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length),
  1728. [](common_params & params, int value) {
  1729. params.sampling.dry_allowed_length = value;
  1730. }
  1731. ).set_sparam());
  1732. add_opt(common_arg(
  1733. {"--dry-penalty-last-n"}, "N",
  1734. string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
  1735. [](common_params & params, int value) {
  1736. if (value < -1) {
  1737. throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
  1738. }
  1739. params.sampling.dry_penalty_last_n = value;
  1740. }
  1741. ).set_sparam());
  1742. add_opt(common_arg(
  1743. {"--dry-sequence-breaker"}, "STRING",
  1744. string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n",
  1745. params.sampling.dry_sequence_breakers.empty() ? "none" :
  1746. std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()),
  1747. params.sampling.dry_sequence_breakers.end(),
  1748. std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'",
  1749. [](const std::string& a, const std::string& b) {
  1750. std::string formatted_b = (b == "\n") ? "\\n" : b;
  1751. return a + ", '" + formatted_b + "'";
  1752. }).c_str()),
  1753. [](common_params & params, const std::string & value) {
  1754. static bool defaults_cleared = false;
  1755. if (!defaults_cleared) {
  1756. params.sampling.dry_sequence_breakers.clear();
  1757. defaults_cleared = true;
  1758. }
  1759. if (value == "none") {
  1760. params.sampling.dry_sequence_breakers.clear();
  1761. } else {
  1762. params.sampling.dry_sequence_breakers.emplace_back(value);
  1763. }
  1764. }
  1765. ).set_sparam());
  1766. add_opt(common_arg(
  1767. {"--dynatemp-range"}, "N",
  1768. string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range),
  1769. [](common_params & params, const std::string & value) {
  1770. params.sampling.dynatemp_range = std::stof(value);
  1771. }
  1772. ).set_sparam());
  1773. add_opt(common_arg(
  1774. {"--dynatemp-exp"}, "N",
  1775. string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent),
  1776. [](common_params & params, const std::string & value) {
  1777. params.sampling.dynatemp_exponent = std::stof(value);
  1778. }
  1779. ).set_sparam());
  1780. add_opt(common_arg(
  1781. {"--mirostat"}, "N",
  1782. string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n"
  1783. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat),
  1784. [](common_params & params, int value) {
  1785. params.sampling.mirostat = value;
  1786. }
  1787. ).set_sparam());
  1788. add_opt(common_arg(
  1789. {"--mirostat-lr"}, "N",
  1790. string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta),
  1791. [](common_params & params, const std::string & value) {
  1792. params.sampling.mirostat_eta = std::stof(value);
  1793. }
  1794. ).set_sparam());
  1795. add_opt(common_arg(
  1796. {"--mirostat-ent"}, "N",
  1797. string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau),
  1798. [](common_params & params, const std::string & value) {
  1799. params.sampling.mirostat_tau = std::stof(value);
  1800. }
  1801. ).set_sparam());
  1802. add_opt(common_arg(
  1803. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1804. "modifies the likelihood of token appearing in the completion,\n"
  1805. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1806. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1807. [](common_params & params, const std::string & value) {
  1808. std::stringstream ss(value);
  1809. llama_token key;
  1810. char sign;
  1811. std::string value_str;
  1812. try {
  1813. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1814. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1815. params.sampling.logit_bias.push_back({key, bias});
  1816. } else {
  1817. throw std::invalid_argument("invalid input format");
  1818. }
  1819. } catch (const std::exception&) {
  1820. throw std::invalid_argument("invalid input format");
  1821. }
  1822. }
  1823. ).set_sparam());
  1824. add_opt(common_arg(
  1825. {"--grammar"}, "GRAMMAR",
  1826. string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()),
  1827. [](common_params & params, const std::string & value) {
  1828. params.sampling.grammar = value;
  1829. }
  1830. ).set_sparam());
  1831. add_opt(common_arg(
  1832. {"--grammar-file"}, "FNAME",
  1833. "file to read grammar from",
  1834. [](common_params & params, const std::string & value) {
  1835. params.sampling.grammar = read_file(value);
  1836. }
  1837. ).set_sparam());
  1838. add_opt(common_arg(
  1839. {"-j", "--json-schema"}, "SCHEMA",
  1840. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1841. [](common_params & params, const std::string & value) {
  1842. params.sampling.grammar = json_schema_to_grammar(json::parse(value));
  1843. }
  1844. ).set_sparam());
  1845. add_opt(common_arg(
  1846. {"-jf", "--json-schema-file"}, "FILE",
  1847. "File containing a JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1848. [](common_params & params, const std::string & value) {
  1849. std::ifstream file(value);
  1850. if (!file) {
  1851. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1852. }
  1853. std::string schema;
  1854. std::copy(
  1855. std::istreambuf_iterator<char>(file),
  1856. std::istreambuf_iterator<char>(),
  1857. std::back_inserter(schema)
  1858. );
  1859. params.sampling.grammar = json_schema_to_grammar(json::parse(schema));
  1860. }
  1861. ).set_sparam());
  1862. add_opt(common_arg(
  1863. {"--pooling"}, "{none,mean,cls,last,rank}",
  1864. "pooling type for embeddings, use model default if unspecified",
  1865. [](common_params & params, const std::string & value) {
  1866. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1867. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1868. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1869. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1870. else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; }
  1871. else { throw std::invalid_argument("invalid value"); }
  1872. }
  1873. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING"));
  1874. add_opt(common_arg(
  1875. {"--attention"}, "{causal,non-causal}",
  1876. "attention type for embeddings, use model default if unspecified",
  1877. [](common_params & params, const std::string & value) {
  1878. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1879. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1880. else { throw std::invalid_argument("invalid value"); }
  1881. }
  1882. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1883. add_opt(common_arg(
  1884. {"--rope-scaling"}, "{none,linear,yarn}",
  1885. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1886. [](common_params & params, const std::string & value) {
  1887. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1888. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1889. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1890. else { throw std::invalid_argument("invalid value"); }
  1891. }
  1892. ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE"));
  1893. add_opt(common_arg(
  1894. {"--rope-scale"}, "N",
  1895. "RoPE context scaling factor, expands context by a factor of N",
  1896. [](common_params & params, const std::string & value) {
  1897. params.rope_freq_scale = 1.0f / std::stof(value);
  1898. }
  1899. ).set_env("LLAMA_ARG_ROPE_SCALE"));
  1900. add_opt(common_arg(
  1901. {"--rope-freq-base"}, "N",
  1902. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1903. [](common_params & params, const std::string & value) {
  1904. params.rope_freq_base = std::stof(value);
  1905. }
  1906. ).set_env("LLAMA_ARG_ROPE_FREQ_BASE"));
  1907. add_opt(common_arg(
  1908. {"--rope-freq-scale"}, "N",
  1909. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1910. [](common_params & params, const std::string & value) {
  1911. params.rope_freq_scale = std::stof(value);
  1912. }
  1913. ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE"));
  1914. add_opt(common_arg(
  1915. {"--yarn-orig-ctx"}, "N",
  1916. string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1917. [](common_params & params, int value) {
  1918. params.yarn_orig_ctx = value;
  1919. }
  1920. ).set_env("LLAMA_ARG_YARN_ORIG_CTX"));
  1921. add_opt(common_arg(
  1922. {"--yarn-ext-factor"}, "N",
  1923. string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1924. [](common_params & params, const std::string & value) {
  1925. params.yarn_ext_factor = std::stof(value);
  1926. }
  1927. ).set_env("LLAMA_ARG_YARN_EXT_FACTOR"));
  1928. add_opt(common_arg(
  1929. {"--yarn-attn-factor"}, "N",
  1930. string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1931. [](common_params & params, const std::string & value) {
  1932. params.yarn_attn_factor = std::stof(value);
  1933. }
  1934. ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR"));
  1935. add_opt(common_arg(
  1936. {"--yarn-beta-slow"}, "N",
  1937. string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1938. [](common_params & params, const std::string & value) {
  1939. params.yarn_beta_slow = std::stof(value);
  1940. }
  1941. ).set_env("LLAMA_ARG_YARN_BETA_SLOW"));
  1942. add_opt(common_arg(
  1943. {"--yarn-beta-fast"}, "N",
  1944. string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1945. [](common_params & params, const std::string & value) {
  1946. params.yarn_beta_fast = std::stof(value);
  1947. }
  1948. ).set_env("LLAMA_ARG_YARN_BETA_FAST"));
  1949. add_opt(common_arg(
  1950. {"-gan", "--grp-attn-n"}, "N",
  1951. string_format("group-attention factor (default: %d)", params.grp_attn_n),
  1952. [](common_params & params, int value) {
  1953. params.grp_attn_n = value;
  1954. }
  1955. ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_PASSKEY}));
  1956. add_opt(common_arg(
  1957. {"-gaw", "--grp-attn-w"}, "N",
  1958. string_format("group-attention width (default: %d)", params.grp_attn_w),
  1959. [](common_params & params, int value) {
  1960. params.grp_attn_w = value;
  1961. }
  1962. ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_MAIN}));
  1963. add_opt(common_arg(
  1964. {"-nkvo", "--no-kv-offload"},
  1965. "disable KV offload",
  1966. [](common_params & params) {
  1967. params.no_kv_offload = true;
  1968. }
  1969. ).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
  1970. add_opt(common_arg(
  1971. {"-nr", "--no-repack"},
  1972. "disable weight repacking",
  1973. [](common_params & params) {
  1974. params.no_extra_bufts = true;
  1975. }
  1976. ).set_env("LLAMA_ARG_NO_REPACK"));
  1977. add_opt(common_arg(
  1978. {"-ctk", "--cache-type-k"}, "TYPE",
  1979. string_format(
  1980. "KV cache data type for K\n"
  1981. "allowed values: %s\n"
  1982. "(default: %s)",
  1983. get_all_kv_cache_types().c_str(),
  1984. ggml_type_name(params.cache_type_k)
  1985. ),
  1986. [](common_params & params, const std::string & value) {
  1987. params.cache_type_k = kv_cache_type_from_str(value);
  1988. }
  1989. ).set_env("LLAMA_ARG_CACHE_TYPE_K"));
  1990. add_opt(common_arg(
  1991. {"-ctv", "--cache-type-v"}, "TYPE",
  1992. string_format(
  1993. "KV cache data type for V\n"
  1994. "allowed values: %s\n"
  1995. "(default: %s)",
  1996. get_all_kv_cache_types().c_str(),
  1997. ggml_type_name(params.cache_type_v)
  1998. ),
  1999. [](common_params & params, const std::string & value) {
  2000. params.cache_type_v = kv_cache_type_from_str(value);
  2001. }
  2002. ).set_env("LLAMA_ARG_CACHE_TYPE_V"));
  2003. add_opt(common_arg(
  2004. {"--hellaswag"},
  2005. "compute HellaSwag score over random tasks from datafile supplied with -f",
  2006. [](common_params & params) {
  2007. params.hellaswag = true;
  2008. }
  2009. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2010. add_opt(common_arg(
  2011. {"--hellaswag-tasks"}, "N",
  2012. string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  2013. [](common_params & params, int value) {
  2014. params.hellaswag_tasks = value;
  2015. }
  2016. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2017. add_opt(common_arg(
  2018. {"--winogrande"},
  2019. "compute Winogrande score over random tasks from datafile supplied with -f",
  2020. [](common_params & params) {
  2021. params.winogrande = true;
  2022. }
  2023. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2024. add_opt(common_arg(
  2025. {"--winogrande-tasks"}, "N",
  2026. string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  2027. [](common_params & params, int value) {
  2028. params.winogrande_tasks = value;
  2029. }
  2030. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2031. add_opt(common_arg(
  2032. {"--multiple-choice"},
  2033. "compute multiple choice score over random tasks from datafile supplied with -f",
  2034. [](common_params & params) {
  2035. params.multiple_choice = true;
  2036. }
  2037. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2038. add_opt(common_arg(
  2039. {"--multiple-choice-tasks"}, "N",
  2040. string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  2041. [](common_params & params, int value) {
  2042. params.multiple_choice_tasks = value;
  2043. }
  2044. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2045. add_opt(common_arg(
  2046. {"--kl-divergence"},
  2047. "computes KL-divergence to logits provided via --kl-divergence-base",
  2048. [](common_params & params) {
  2049. params.kl_divergence = true;
  2050. }
  2051. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2052. add_opt(common_arg(
  2053. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  2054. "set logits file",
  2055. [](common_params & params, const std::string & value) {
  2056. params.logits_file = value;
  2057. }
  2058. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2059. add_opt(common_arg(
  2060. {"--ppl-stride"}, "N",
  2061. string_format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  2062. [](common_params & params, int value) {
  2063. params.ppl_stride = value;
  2064. }
  2065. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2066. add_opt(common_arg(
  2067. {"--ppl-output-type"}, "<0|1>",
  2068. string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  2069. [](common_params & params, int value) {
  2070. params.ppl_output_type = value;
  2071. }
  2072. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  2073. add_opt(common_arg(
  2074. {"-dt", "--defrag-thold"}, "N",
  2075. string_format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold),
  2076. [](common_params & params, const std::string & value) {
  2077. params.defrag_thold = std::stof(value);
  2078. }
  2079. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  2080. add_opt(common_arg(
  2081. {"-np", "--parallel"}, "N",
  2082. string_format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  2083. [](common_params & params, int value) {
  2084. params.n_parallel = value;
  2085. }
  2086. ).set_env("LLAMA_ARG_N_PARALLEL"));
  2087. add_opt(common_arg(
  2088. {"-ns", "--sequences"}, "N",
  2089. string_format("number of sequences to decode (default: %d)", params.n_sequences),
  2090. [](common_params & params, int value) {
  2091. params.n_sequences = value;
  2092. }
  2093. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  2094. add_opt(common_arg(
  2095. {"-cb", "--cont-batching"},
  2096. string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  2097. [](common_params & params) {
  2098. params.cont_batching = true;
  2099. }
  2100. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  2101. add_opt(common_arg(
  2102. {"-nocb", "--no-cont-batching"},
  2103. "disable continuous batching",
  2104. [](common_params & params) {
  2105. params.cont_batching = false;
  2106. }
  2107. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  2108. add_opt(common_arg(
  2109. {"--mmproj"}, "FILE",
  2110. "path to a multimodal projector file. see tools/mtmd/README.md\n"
  2111. "note: if -hf is used, this argument can be omitted",
  2112. [](common_params & params, const std::string & value) {
  2113. params.mmproj.path = value;
  2114. }
  2115. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ"));
  2116. add_opt(common_arg(
  2117. {"--mmproj-url"}, "URL",
  2118. "URL to a multimodal projector file. see tools/mtmd/README.md",
  2119. [](common_params & params, const std::string & value) {
  2120. params.mmproj.url = value;
  2121. }
  2122. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_MMPROJ_URL"));
  2123. add_opt(common_arg(
  2124. {"--no-mmproj"},
  2125. "explicitly disable multimodal projector, useful when using -hf",
  2126. [](common_params & params) {
  2127. params.no_mmproj = true;
  2128. }
  2129. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ"));
  2130. add_opt(common_arg(
  2131. {"--no-mmproj-offload"},
  2132. "do not offload multimodal projector to GPU",
  2133. [](common_params & params) {
  2134. params.mmproj_use_gpu = false;
  2135. }
  2136. ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD"));
  2137. add_opt(common_arg(
  2138. {"--image", "--audio"}, "FILE",
  2139. "path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n",
  2140. [](common_params & params, const std::string & value) {
  2141. params.image.emplace_back(value);
  2142. }
  2143. ).set_examples({LLAMA_EXAMPLE_MTMD}));
  2144. if (llama_supports_rpc()) {
  2145. add_opt(common_arg(
  2146. {"--rpc"}, "SERVERS",
  2147. "comma separated list of RPC servers",
  2148. [](common_params & params, const std::string & value) {
  2149. add_rpc_devices(value);
  2150. GGML_UNUSED(params);
  2151. }
  2152. ).set_env("LLAMA_ARG_RPC"));
  2153. }
  2154. add_opt(common_arg(
  2155. {"--mlock"},
  2156. "force system to keep model in RAM rather than swapping or compressing",
  2157. [](common_params & params) {
  2158. params.use_mlock = true;
  2159. }
  2160. ).set_env("LLAMA_ARG_MLOCK"));
  2161. add_opt(common_arg(
  2162. {"--no-mmap"},
  2163. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  2164. [](common_params & params) {
  2165. params.use_mmap = false;
  2166. }
  2167. ).set_env("LLAMA_ARG_NO_MMAP"));
  2168. add_opt(common_arg(
  2169. {"--numa"}, "TYPE",
  2170. "attempt optimizations that help on some NUMA systems\n"
  2171. "- distribute: spread execution evenly over all nodes\n"
  2172. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  2173. "- numactl: use the CPU map provided by numactl\n"
  2174. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  2175. "see https://github.com/ggml-org/llama.cpp/issues/1437",
  2176. [](common_params & params, const std::string & value) {
  2177. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2178. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2179. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2180. else { throw std::invalid_argument("invalid value"); }
  2181. }
  2182. ).set_env("LLAMA_ARG_NUMA"));
  2183. add_opt(common_arg(
  2184. {"-dev", "--device"}, "<dev1,dev2,..>",
  2185. "comma-separated list of devices to use for offloading (none = don't offload)\n"
  2186. "use --list-devices to see a list of available devices",
  2187. [](common_params & params, const std::string & value) {
  2188. params.devices = parse_device_list(value);
  2189. }
  2190. ).set_env("LLAMA_ARG_DEVICE"));
  2191. add_opt(common_arg(
  2192. {"--list-devices"},
  2193. "print list of available devices and exit",
  2194. [](common_params &) {
  2195. std::vector<ggml_backend_dev_t> rpc_devices;
  2196. std::vector<ggml_backend_dev_t> all_devices;
  2197. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  2198. auto * dev = ggml_backend_dev_get(i);
  2199. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
  2200. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  2201. if (ggml_backend_reg_name(reg) == std::string("RPC")) {
  2202. rpc_devices.push_back(dev);
  2203. } else {
  2204. all_devices.push_back(dev);
  2205. }
  2206. }
  2207. }
  2208. // insert RPC devices in front
  2209. all_devices.insert(all_devices.begin(), rpc_devices.begin(), rpc_devices.end());
  2210. printf("Available devices:\n");
  2211. for (size_t i = 0; i < all_devices.size(); ++i) {
  2212. auto * dev = all_devices[i];
  2213. size_t free, total;
  2214. ggml_backend_dev_memory(dev, &free, &total);
  2215. printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
  2216. }
  2217. exit(0);
  2218. }
  2219. ));
  2220. add_opt(common_arg(
  2221. {"--override-tensor", "-ot"}, "<tensor name pattern>=<buffer type>,...",
  2222. "override tensor buffer type", [](common_params & params, const std::string & value) {
  2223. parse_tensor_buffer_overrides(value, params.tensor_buft_overrides);
  2224. }
  2225. ));
  2226. add_opt(common_arg(
  2227. {"--override-tensor-draft", "-otd"}, "<tensor name pattern>=<buffer type>,...",
  2228. "override tensor buffer type for draft model", [](common_params & params, const std::string & value) {
  2229. parse_tensor_buffer_overrides(value, params.speculative.tensor_buft_overrides);
  2230. }
  2231. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2232. add_opt(common_arg(
  2233. {"--cpu-moe", "-cmoe"},
  2234. "keep all Mixture of Experts (MoE) weights in the CPU",
  2235. [](common_params & params) {
  2236. params.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
  2237. }
  2238. ).set_env("LLAMA_ARG_CPU_MOE"));
  2239. add_opt(common_arg(
  2240. {"--n-cpu-moe", "-ncmoe"}, "N",
  2241. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU",
  2242. [](common_params & params, int value) {
  2243. if (value < 0) {
  2244. throw std::invalid_argument("invalid value");
  2245. }
  2246. for (int i = 0; i < value; ++i) {
  2247. // keep strings alive and avoid leaking memory by storing them in a static vector
  2248. static std::list<std::string> buft_overrides;
  2249. buft_overrides.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
  2250. params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
  2251. }
  2252. }
  2253. ).set_env("LLAMA_ARG_N_CPU_MOE"));
  2254. add_opt(common_arg(
  2255. {"--cpu-moe-draft", "-cmoed"},
  2256. "keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
  2257. [](common_params & params) {
  2258. params.speculative.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
  2259. }
  2260. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
  2261. add_opt(common_arg(
  2262. {"--n-cpu-moe-draft", "-ncmoed"}, "N",
  2263. "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model",
  2264. [](common_params & params, int value) {
  2265. if (value < 0) {
  2266. throw std::invalid_argument("invalid value");
  2267. }
  2268. for (int i = 0; i < value; ++i) {
  2269. static std::list<std::string> buft_overrides_draft;
  2270. buft_overrides_draft.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
  2271. params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
  2272. }
  2273. }
  2274. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_CPU_MOE_DRAFT"));
  2275. add_opt(common_arg(
  2276. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  2277. "number of layers to store in VRAM",
  2278. [](common_params & params, int value) {
  2279. params.n_gpu_layers = value;
  2280. if (!llama_supports_gpu_offload()) {
  2281. fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
  2282. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2283. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2284. }
  2285. }
  2286. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  2287. add_opt(common_arg(
  2288. {"-sm", "--split-mode"}, "{none,layer,row}",
  2289. "how to split the model across multiple GPUs, one of:\n"
  2290. "- none: use one GPU only\n"
  2291. "- layer (default): split layers and KV across GPUs\n"
  2292. "- row: split rows across GPUs",
  2293. [](common_params & params, const std::string & value) {
  2294. std::string arg_next = value;
  2295. if (arg_next == "none") {
  2296. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  2297. } else if (arg_next == "layer") {
  2298. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  2299. } else if (arg_next == "row") {
  2300. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  2301. } else {
  2302. throw std::invalid_argument("invalid value");
  2303. }
  2304. if (!llama_supports_gpu_offload()) {
  2305. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n");
  2306. }
  2307. }
  2308. ).set_env("LLAMA_ARG_SPLIT_MODE"));
  2309. add_opt(common_arg(
  2310. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  2311. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  2312. [](common_params & params, const std::string & value) {
  2313. std::string arg_next = value;
  2314. // split string by , and /
  2315. const std::regex regex{ R"([,/]+)" };
  2316. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  2317. std::vector<std::string> split_arg{ it, {} };
  2318. if (split_arg.size() >= llama_max_devices()) {
  2319. throw std::invalid_argument(
  2320. string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  2321. );
  2322. }
  2323. for (size_t i = 0; i < llama_max_devices(); ++i) {
  2324. if (i < split_arg.size()) {
  2325. params.tensor_split[i] = std::stof(split_arg[i]);
  2326. } else {
  2327. params.tensor_split[i] = 0.0f;
  2328. }
  2329. }
  2330. if (!llama_supports_gpu_offload()) {
  2331. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n");
  2332. }
  2333. }
  2334. ).set_env("LLAMA_ARG_TENSOR_SPLIT"));
  2335. add_opt(common_arg(
  2336. {"-mg", "--main-gpu"}, "INDEX",
  2337. string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  2338. [](common_params & params, int value) {
  2339. params.main_gpu = value;
  2340. if (!llama_supports_gpu_offload()) {
  2341. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n");
  2342. }
  2343. }
  2344. ).set_env("LLAMA_ARG_MAIN_GPU"));
  2345. add_opt(common_arg(
  2346. {"--check-tensors"},
  2347. string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  2348. [](common_params & params) {
  2349. params.check_tensors = true;
  2350. }
  2351. ));
  2352. add_opt(common_arg(
  2353. {"--override-kv"}, "KEY=TYPE:VALUE",
  2354. "advanced option to override model metadata by key. may be specified multiple times.\n"
  2355. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  2356. [](common_params & params, const std::string & value) {
  2357. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  2358. throw std::runtime_error(string_format("error: Invalid type for KV override: %s\n", value.c_str()));
  2359. }
  2360. }
  2361. ));
  2362. add_opt(common_arg(
  2363. {"--no-op-offload"},
  2364. string_format("disable offloading host tensor operations to device (default: %s)", params.no_op_offload ? "true" : "false"),
  2365. [](common_params & params) {
  2366. params.no_op_offload = true;
  2367. }
  2368. ));
  2369. add_opt(common_arg(
  2370. {"--lora"}, "FNAME",
  2371. "path to LoRA adapter (can be repeated to use multiple adapters)",
  2372. [](common_params & params, const std::string & value) {
  2373. params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
  2374. }
  2375. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2376. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2377. add_opt(common_arg(
  2378. {"--lora-scaled"}, "FNAME", "SCALE",
  2379. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  2380. [](common_params & params, const std::string & fname, const std::string & scale) {
  2381. params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
  2382. }
  2383. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2384. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2385. add_opt(common_arg(
  2386. {"--control-vector"}, "FNAME",
  2387. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  2388. [](common_params & params, const std::string & value) {
  2389. params.control_vectors.push_back({ 1.0f, value, });
  2390. }
  2391. ));
  2392. add_opt(common_arg(
  2393. {"--control-vector-scaled"}, "FNAME", "SCALE",
  2394. "add a control vector with user defined scaling SCALE\n"
  2395. "note: this argument can be repeated to add multiple scaled control vectors",
  2396. [](common_params & params, const std::string & fname, const std::string & scale) {
  2397. params.control_vectors.push_back({ std::stof(scale), fname });
  2398. }
  2399. ));
  2400. add_opt(common_arg(
  2401. {"--control-vector-layer-range"}, "START", "END",
  2402. "layer range to apply the control vector(s) to, start and end inclusive",
  2403. [](common_params & params, const std::string & start, const std::string & end) {
  2404. params.control_vector_layer_start = std::stoi(start);
  2405. params.control_vector_layer_end = std::stoi(end);
  2406. }
  2407. ));
  2408. add_opt(common_arg(
  2409. {"-a", "--alias"}, "STRING",
  2410. "set alias for model name (to be used by REST API)",
  2411. [](common_params & params, const std::string & value) {
  2412. params.model_alias = value;
  2413. }
  2414. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS"));
  2415. add_opt(common_arg(
  2416. {"-m", "--model"}, "FNAME",
  2417. ex == LLAMA_EXAMPLE_EXPORT_LORA
  2418. ? std::string("model path from which to load base model")
  2419. : string_format(
  2420. "model path (default: `models/$filename` with filename from `--hf-file` "
  2421. "or `--model-url` if set, otherwise %s)", DEFAULT_MODEL_PATH
  2422. ),
  2423. [](common_params & params, const std::string & value) {
  2424. params.model.path = value;
  2425. }
  2426. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  2427. add_opt(common_arg(
  2428. {"-mu", "--model-url"}, "MODEL_URL",
  2429. "model download url (default: unused)",
  2430. [](common_params & params, const std::string & value) {
  2431. params.model.url = value;
  2432. }
  2433. ).set_env("LLAMA_ARG_MODEL_URL"));
  2434. add_opt(common_arg(
  2435. {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]",
  2436. "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
  2437. "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n"
  2438. "example: unsloth/phi-4-GGUF:q4_k_m\n"
  2439. "(default: unused)",
  2440. [](common_params & params, const std::string & value) {
  2441. params.model.hf_repo = value;
  2442. }
  2443. ).set_env("LLAMA_ARG_HF_REPO"));
  2444. add_opt(common_arg(
  2445. {"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
  2446. "Same as --hf-repo, but for the draft model (default: unused)",
  2447. [](common_params & params, const std::string & value) {
  2448. params.speculative.model.hf_repo = value;
  2449. }
  2450. ).set_env("LLAMA_ARG_HFD_REPO"));
  2451. add_opt(common_arg(
  2452. {"-hff", "--hf-file"}, "FILE",
  2453. "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
  2454. [](common_params & params, const std::string & value) {
  2455. params.model.hf_file = value;
  2456. }
  2457. ).set_env("LLAMA_ARG_HF_FILE"));
  2458. add_opt(common_arg(
  2459. {"-hfv", "-hfrv", "--hf-repo-v"}, "<user>/<model>[:quant]",
  2460. "Hugging Face model repository for the vocoder model (default: unused)",
  2461. [](common_params & params, const std::string & value) {
  2462. params.vocoder.model.hf_repo = value;
  2463. }
  2464. ).set_env("LLAMA_ARG_HF_REPO_V"));
  2465. add_opt(common_arg(
  2466. {"-hffv", "--hf-file-v"}, "FILE",
  2467. "Hugging Face model file for the vocoder model (default: unused)",
  2468. [](common_params & params, const std::string & value) {
  2469. params.vocoder.model.hf_file = value;
  2470. }
  2471. ).set_env("LLAMA_ARG_HF_FILE_V"));
  2472. add_opt(common_arg(
  2473. {"-hft", "--hf-token"}, "TOKEN",
  2474. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  2475. [](common_params & params, const std::string & value) {
  2476. params.hf_token = value;
  2477. }
  2478. ).set_env("HF_TOKEN"));
  2479. add_opt(common_arg(
  2480. {"--context-file"}, "FNAME",
  2481. "file to load context from (repeat to specify multiple files)",
  2482. [](common_params & params, const std::string & value) {
  2483. std::ifstream file(value, std::ios::binary);
  2484. if (!file) {
  2485. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2486. }
  2487. params.context_files.push_back(value);
  2488. }
  2489. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2490. add_opt(common_arg(
  2491. {"--chunk-size"}, "N",
  2492. string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  2493. [](common_params & params, int value) {
  2494. params.chunk_size = value;
  2495. }
  2496. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2497. add_opt(common_arg(
  2498. {"--chunk-separator"}, "STRING",
  2499. string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  2500. [](common_params & params, const std::string & value) {
  2501. params.chunk_separator = value;
  2502. }
  2503. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2504. add_opt(common_arg(
  2505. {"--junk"}, "N",
  2506. string_format("number of times to repeat the junk text (default: %d)", params.n_junk),
  2507. [](common_params & params, int value) {
  2508. params.n_junk = value;
  2509. }
  2510. ).set_examples({LLAMA_EXAMPLE_PASSKEY, LLAMA_EXAMPLE_PARALLEL}));
  2511. add_opt(common_arg(
  2512. {"--pos"}, "N",
  2513. string_format("position of the passkey in the junk text (default: %d)", params.i_pos),
  2514. [](common_params & params, int value) {
  2515. params.i_pos = value;
  2516. }
  2517. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  2518. add_opt(common_arg(
  2519. {"-o", "--output", "--output-file"}, "FNAME",
  2520. string_format("output file (default: '%s')", params.out_file.c_str()),
  2521. [](common_params & params, const std::string & value) {
  2522. params.out_file = value;
  2523. }
  2524. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS}));
  2525. add_opt(common_arg(
  2526. {"-ofreq", "--output-frequency"}, "N",
  2527. string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  2528. [](common_params & params, int value) {
  2529. params.n_out_freq = value;
  2530. }
  2531. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2532. add_opt(common_arg(
  2533. {"--output-format"}, "{gguf,dat}",
  2534. string_format("output format for imatrix file (default: %s)", params.imat_dat > 0 ? "dat" : "gguf"),
  2535. [](common_params & params, const std::string & value) {
  2536. /**/ if (value == "gguf") { params.imat_dat = -1; }
  2537. else if (value == "dat") { params.imat_dat = 1; }
  2538. else { throw std::invalid_argument("invalid output format"); }
  2539. }
  2540. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2541. add_opt(common_arg(
  2542. {"--save-frequency"}, "N",
  2543. string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  2544. [](common_params & params, int value) {
  2545. params.n_save_freq = value;
  2546. }
  2547. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2548. add_opt(common_arg(
  2549. {"--process-output"},
  2550. string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  2551. [](common_params & params) {
  2552. params.process_output = true;
  2553. }
  2554. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2555. add_opt(common_arg(
  2556. {"--no-ppl"},
  2557. string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  2558. [](common_params & params) {
  2559. params.compute_ppl = false;
  2560. }
  2561. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2562. add_opt(common_arg(
  2563. {"--chunk", "--from-chunk"}, "N",
  2564. string_format("start processing the input from chunk N (default: %d)", params.i_chunk),
  2565. [](common_params & params, int value) {
  2566. params.i_chunk = value;
  2567. }
  2568. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2569. add_opt(common_arg(
  2570. {"--show-statistics"},
  2571. string_format("show imatrix statistics and then exit (default: %s)", params.show_statistics ? "true" : "false"),
  2572. [](common_params & params) {
  2573. params.show_statistics = true;
  2574. }
  2575. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2576. add_opt(common_arg(
  2577. {"--parse-special"},
  2578. string_format("prase special tokens (chat, tool, etc) (default: %s)", params.parse_special ? "true" : "false"),
  2579. [](common_params & params) {
  2580. params.parse_special = true;
  2581. }
  2582. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2583. add_opt(common_arg(
  2584. {"-pps"},
  2585. string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  2586. [](common_params & params) {
  2587. params.is_pp_shared = true;
  2588. }
  2589. ).set_examples({LLAMA_EXAMPLE_BENCH, LLAMA_EXAMPLE_PARALLEL}));
  2590. add_opt(common_arg(
  2591. {"-npp"}, "n0,n1,...",
  2592. "number of prompt tokens",
  2593. [](common_params & params, const std::string & value) {
  2594. auto p = string_split<int>(value, ',');
  2595. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  2596. }
  2597. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2598. add_opt(common_arg(
  2599. {"-ntg"}, "n0,n1,...",
  2600. "number of text generation tokens",
  2601. [](common_params & params, const std::string & value) {
  2602. auto p = string_split<int>(value, ',');
  2603. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  2604. }
  2605. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2606. add_opt(common_arg(
  2607. {"-npl"}, "n0,n1,...",
  2608. "number of parallel prompts",
  2609. [](common_params & params, const std::string & value) {
  2610. auto p = string_split<int>(value, ',');
  2611. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  2612. }
  2613. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2614. add_opt(common_arg(
  2615. {"--embd-normalize"}, "N",
  2616. string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  2617. [](common_params & params, int value) {
  2618. params.embd_normalize = value;
  2619. }
  2620. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2621. add_opt(common_arg(
  2622. {"--embd-output-format"}, "FORMAT",
  2623. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix",
  2624. [](common_params & params, const std::string & value) {
  2625. params.embd_out = value;
  2626. }
  2627. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2628. add_opt(common_arg(
  2629. {"--embd-separator"}, "STRING",
  2630. "separator of embeddings (default \\n) for example \"<#sep#>\"",
  2631. [](common_params & params, const std::string & value) {
  2632. params.embd_sep = value;
  2633. }
  2634. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2635. add_opt(common_arg(
  2636. {"--cls-separator"}, "STRING",
  2637. "separator of classification sequences (default \\t) for example \"<#seq#>\"",
  2638. [](common_params & params, const std::string & value) {
  2639. params.cls_sep = value;
  2640. }
  2641. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2642. add_opt(common_arg(
  2643. {"--host"}, "HOST",
  2644. string_format("ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: %s)", params.hostname.c_str()),
  2645. [](common_params & params, const std::string & value) {
  2646. params.hostname = value;
  2647. }
  2648. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  2649. add_opt(common_arg(
  2650. {"--port"}, "PORT",
  2651. string_format("port to listen (default: %d)", params.port),
  2652. [](common_params & params, int value) {
  2653. params.port = value;
  2654. }
  2655. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  2656. add_opt(common_arg(
  2657. {"--path"}, "PATH",
  2658. string_format("path to serve static files from (default: %s)", params.public_path.c_str()),
  2659. [](common_params & params, const std::string & value) {
  2660. params.public_path = value;
  2661. }
  2662. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
  2663. add_opt(common_arg(
  2664. {"--api-prefix"}, "PREFIX",
  2665. string_format("prefix path the server serves from, without the trailing slash (default: %s)", params.api_prefix.c_str()),
  2666. [](common_params & params, const std::string & value) {
  2667. params.api_prefix = value;
  2668. }
  2669. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_API_PREFIX"));
  2670. add_opt(common_arg(
  2671. {"--no-webui"},
  2672. string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),
  2673. [](common_params & params) {
  2674. params.webui = false;
  2675. }
  2676. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI"));
  2677. add_opt(common_arg(
  2678. {"--embedding", "--embeddings"},
  2679. string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  2680. [](common_params & params) {
  2681. params.embedding = true;
  2682. }
  2683. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  2684. add_opt(common_arg(
  2685. {"--reranking", "--rerank"},
  2686. string_format("enable reranking endpoint on server (default: %s)", "disabled"),
  2687. [](common_params & params) {
  2688. params.embedding = true;
  2689. params.pooling_type = LLAMA_POOLING_TYPE_RANK;
  2690. }
  2691. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING"));
  2692. add_opt(common_arg(
  2693. {"--api-key"}, "KEY",
  2694. "API key to use for authentication (default: none)",
  2695. [](common_params & params, const std::string & value) {
  2696. params.api_keys.push_back(value);
  2697. }
  2698. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  2699. add_opt(common_arg(
  2700. {"--api-key-file"}, "FNAME",
  2701. "path to file containing API keys (default: none)",
  2702. [](common_params & params, const std::string & value) {
  2703. std::ifstream key_file(value);
  2704. if (!key_file) {
  2705. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2706. }
  2707. std::string key;
  2708. while (std::getline(key_file, key)) {
  2709. if (!key.empty()) {
  2710. params.api_keys.push_back(key);
  2711. }
  2712. }
  2713. key_file.close();
  2714. }
  2715. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2716. add_opt(common_arg(
  2717. {"--ssl-key-file"}, "FNAME",
  2718. "path to file a PEM-encoded SSL private key",
  2719. [](common_params & params, const std::string & value) {
  2720. params.ssl_file_key = value;
  2721. }
  2722. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE"));
  2723. add_opt(common_arg(
  2724. {"--ssl-cert-file"}, "FNAME",
  2725. "path to file a PEM-encoded SSL certificate",
  2726. [](common_params & params, const std::string & value) {
  2727. params.ssl_file_cert = value;
  2728. }
  2729. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE"));
  2730. add_opt(common_arg(
  2731. {"--chat-template-kwargs"}, "STRING",
  2732. string_format("sets additional params for the json template parser"),
  2733. [](common_params & params, const std::string & value) {
  2734. auto parsed = json::parse(value);
  2735. for (const auto & item : parsed.items()) {
  2736. params.default_template_kwargs[item.key()] = item.value().dump();
  2737. }
  2738. }
  2739. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_CHAT_TEMPLATE_KWARGS"));
  2740. add_opt(common_arg(
  2741. {"-to", "--timeout"}, "N",
  2742. string_format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  2743. [](common_params & params, int value) {
  2744. params.timeout_read = value;
  2745. params.timeout_write = value;
  2746. }
  2747. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT"));
  2748. add_opt(common_arg(
  2749. {"--threads-http"}, "N",
  2750. string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  2751. [](common_params & params, int value) {
  2752. params.n_threads_http = value;
  2753. }
  2754. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  2755. add_opt(common_arg(
  2756. {"--cache-reuse"}, "N",
  2757. string_format(
  2758. "min chunk size to attempt reusing from the cache via KV shifting (default: %d)\n"
  2759. "[(card)](https://ggml.ai/f0.png)", params.n_cache_reuse
  2760. ),
  2761. [](common_params & params, int value) {
  2762. params.n_cache_reuse = value;
  2763. }
  2764. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
  2765. add_opt(common_arg(
  2766. {"--metrics"},
  2767. string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  2768. [](common_params & params) {
  2769. params.endpoint_metrics = true;
  2770. }
  2771. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  2772. add_opt(common_arg(
  2773. {"--slots"},
  2774. string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  2775. [](common_params & params) {
  2776. params.endpoint_slots = true;
  2777. }
  2778. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
  2779. add_opt(common_arg(
  2780. {"--props"},
  2781. string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
  2782. [](common_params & params) {
  2783. params.endpoint_props = true;
  2784. }
  2785. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
  2786. add_opt(common_arg(
  2787. {"--no-slots"},
  2788. "disables slots monitoring endpoint",
  2789. [](common_params & params) {
  2790. params.endpoint_slots = false;
  2791. }
  2792. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  2793. add_opt(common_arg(
  2794. {"--slot-save-path"}, "PATH",
  2795. "path to save slot kv cache (default: disabled)",
  2796. [](common_params & params, const std::string & value) {
  2797. params.slot_save_path = value;
  2798. // if doesn't end with DIRECTORY_SEPARATOR, add it
  2799. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  2800. params.slot_save_path += DIRECTORY_SEPARATOR;
  2801. }
  2802. }
  2803. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2804. add_opt(common_arg(
  2805. {"--jinja"},
  2806. "use jinja template for chat (default: disabled)",
  2807. [](common_params & params) {
  2808. params.use_jinja = true;
  2809. }
  2810. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA"));
  2811. add_opt(common_arg(
  2812. {"--reasoning-format"}, "FORMAT",
  2813. "controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:\n"
  2814. "- none: leaves thoughts unparsed in `message.content`\n"
  2815. "- deepseek: puts thoughts in `message.reasoning_content` (except in streaming mode, which behaves as `none`)\n"
  2816. "(default: auto)",
  2817. [](common_params & params, const std::string & value) {
  2818. params.reasoning_format = common_reasoning_format_from_name(value);
  2819. }
  2820. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK"));
  2821. add_opt(common_arg(
  2822. {"--reasoning-budget"}, "N",
  2823. "controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)",
  2824. [](common_params & params, int value) {
  2825. if (value != 0 && value != -1) { throw std::invalid_argument("invalid value"); }
  2826. params.reasoning_budget = value;
  2827. }
  2828. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK_BUDGET"));
  2829. add_opt(common_arg(
  2830. {"--chat-template"}, "JINJA_TEMPLATE",
  2831. string_format(
  2832. "set custom jinja chat template (default: template taken from model's metadata)\n"
  2833. "if suffix/prefix are specified, template will be disabled\n"
  2834. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2835. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2836. ),
  2837. [](common_params & params, const std::string & value) {
  2838. params.chat_template = value;
  2839. }
  2840. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  2841. add_opt(common_arg(
  2842. {"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
  2843. string_format(
  2844. "set custom jinja chat template file (default: template taken from model's metadata)\n"
  2845. "if suffix/prefix are specified, template will be disabled\n"
  2846. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2847. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2848. ),
  2849. [](common_params & params, const std::string & value) {
  2850. params.chat_template = read_file(value);
  2851. }
  2852. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
  2853. add_opt(common_arg(
  2854. {"--no-prefill-assistant"},
  2855. string_format(
  2856. "whether to prefill the assistant's response if the last message is an assistant message (default: prefill enabled)\n"
  2857. "when this flag is set, if the last message is an assistant message then it will be treated as a full message and not prefilled\n"
  2858. ),
  2859. [](common_params & params) {
  2860. params.prefill_assistant = false;
  2861. }
  2862. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_PREFILL_ASSISTANT"));
  2863. add_opt(common_arg(
  2864. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  2865. string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  2866. [](common_params & params, const std::string & value) {
  2867. params.slot_prompt_similarity = std::stof(value);
  2868. }
  2869. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2870. add_opt(common_arg(
  2871. {"--lora-init-without-apply"},
  2872. string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  2873. [](common_params & params) {
  2874. params.lora_init_without_apply = true;
  2875. }
  2876. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2877. add_opt(common_arg(
  2878. {"--simple-io"},
  2879. "use basic IO for better compatibility in subprocesses and limited consoles",
  2880. [](common_params & params) {
  2881. params.simple_io = true;
  2882. }
  2883. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  2884. add_opt(common_arg(
  2885. {"--positive-file"}, "FNAME",
  2886. string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  2887. [](common_params & params, const std::string & value) {
  2888. params.cvector_positive_file = value;
  2889. }
  2890. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2891. add_opt(common_arg(
  2892. {"--negative-file"}, "FNAME",
  2893. string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  2894. [](common_params & params, const std::string & value) {
  2895. params.cvector_negative_file = value;
  2896. }
  2897. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2898. add_opt(common_arg(
  2899. {"--pca-batch"}, "N",
  2900. string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  2901. [](common_params & params, int value) {
  2902. params.n_pca_batch = value;
  2903. }
  2904. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2905. add_opt(common_arg(
  2906. {"--pca-iter"}, "N",
  2907. string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  2908. [](common_params & params, int value) {
  2909. params.n_pca_iterations = value;
  2910. }
  2911. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2912. add_opt(common_arg(
  2913. {"--method"}, "{pca, mean}",
  2914. "dimensionality reduction method to be used (default: pca)",
  2915. [](common_params & params, const std::string & value) {
  2916. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  2917. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  2918. else { throw std::invalid_argument("invalid value"); }
  2919. }
  2920. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2921. add_opt(common_arg(
  2922. {"--output-format"}, "{md,jsonl}",
  2923. "output format for batched-bench results (default: md)",
  2924. [](common_params & params, const std::string & value) {
  2925. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  2926. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  2927. else { throw std::invalid_argument("invalid value"); }
  2928. }
  2929. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2930. add_opt(common_arg(
  2931. {"--log-disable"},
  2932. "Log disable",
  2933. [](common_params &) {
  2934. common_log_pause(common_log_main());
  2935. }
  2936. ));
  2937. add_opt(common_arg(
  2938. {"--log-file"}, "FNAME",
  2939. "Log to file",
  2940. [](common_params &, const std::string & value) {
  2941. common_log_set_file(common_log_main(), value.c_str());
  2942. }
  2943. ));
  2944. add_opt(common_arg(
  2945. {"--log-colors"},
  2946. "Enable colored logging",
  2947. [](common_params &) {
  2948. common_log_set_colors(common_log_main(), true);
  2949. }
  2950. ).set_env("LLAMA_LOG_COLORS"));
  2951. add_opt(common_arg(
  2952. {"-v", "--verbose", "--log-verbose"},
  2953. "Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
  2954. [](common_params & params) {
  2955. params.verbosity = INT_MAX;
  2956. common_log_set_verbosity_thold(INT_MAX);
  2957. }
  2958. ));
  2959. add_opt(common_arg(
  2960. {"--offline"},
  2961. "Offline mode: forces use of cache, prevents network access",
  2962. [](common_params & params) {
  2963. params.offline = true;
  2964. }
  2965. ).set_env("LLAMA_OFFLINE"));
  2966. add_opt(common_arg(
  2967. {"-lv", "--verbosity", "--log-verbosity"}, "N",
  2968. "Set the verbosity threshold. Messages with a higher verbosity will be ignored.",
  2969. [](common_params & params, int value) {
  2970. params.verbosity = value;
  2971. common_log_set_verbosity_thold(value);
  2972. }
  2973. ).set_env("LLAMA_LOG_VERBOSITY"));
  2974. add_opt(common_arg(
  2975. {"--log-prefix"},
  2976. "Enable prefix in log messages",
  2977. [](common_params &) {
  2978. common_log_set_prefix(common_log_main(), true);
  2979. }
  2980. ).set_env("LLAMA_LOG_PREFIX"));
  2981. add_opt(common_arg(
  2982. {"--log-timestamps"},
  2983. "Enable timestamps in log messages",
  2984. [](common_params &) {
  2985. common_log_set_timestamps(common_log_main(), true);
  2986. }
  2987. ).set_env("LLAMA_LOG_TIMESTAMPS"));
  2988. // speculative parameters
  2989. add_opt(common_arg(
  2990. {"-td", "--threads-draft"}, "N",
  2991. "number of threads to use during generation (default: same as --threads)",
  2992. [](common_params & params, int value) {
  2993. params.speculative.cpuparams.n_threads = value;
  2994. if (params.speculative.cpuparams.n_threads <= 0) {
  2995. params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
  2996. }
  2997. }
  2998. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2999. add_opt(common_arg(
  3000. {"-tbd", "--threads-batch-draft"}, "N",
  3001. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  3002. [](common_params & params, int value) {
  3003. params.speculative.cpuparams_batch.n_threads = value;
  3004. if (params.speculative.cpuparams_batch.n_threads <= 0) {
  3005. params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  3006. }
  3007. }
  3008. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  3009. add_opt(common_arg(
  3010. {"-Cd", "--cpu-mask-draft"}, "M",
  3011. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  3012. [](common_params & params, const std::string & mask) {
  3013. params.speculative.cpuparams.mask_valid = true;
  3014. if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) {
  3015. throw std::invalid_argument("invalid cpumask");
  3016. }
  3017. }
  3018. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3019. add_opt(common_arg(
  3020. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  3021. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  3022. [](common_params & params, const std::string & range) {
  3023. params.speculative.cpuparams.mask_valid = true;
  3024. if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) {
  3025. throw std::invalid_argument("invalid range");
  3026. }
  3027. }
  3028. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3029. add_opt(common_arg(
  3030. {"--cpu-strict-draft"}, "<0|1>",
  3031. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  3032. [](common_params & params, int value) {
  3033. params.speculative.cpuparams.strict_cpu = value;
  3034. }
  3035. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3036. add_opt(common_arg(
  3037. {"--prio-draft"}, "N",
  3038. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority),
  3039. [](common_params & params, int prio) {
  3040. if (prio < 0 || prio > 3) {
  3041. throw std::invalid_argument("invalid value");
  3042. }
  3043. params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio;
  3044. }
  3045. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3046. add_opt(common_arg(
  3047. {"--poll-draft"}, "<0|1>",
  3048. "Use polling to wait for draft model work (default: same as --poll])",
  3049. [](common_params & params, int value) {
  3050. params.speculative.cpuparams.poll = value;
  3051. }
  3052. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3053. add_opt(common_arg(
  3054. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  3055. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  3056. [](common_params & params, const std::string & mask) {
  3057. params.speculative.cpuparams_batch.mask_valid = true;
  3058. if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) {
  3059. throw std::invalid_argument("invalid cpumask");
  3060. }
  3061. }
  3062. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3063. add_opt(common_arg(
  3064. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  3065. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  3066. [](common_params & params, const std::string & range) {
  3067. params.speculative.cpuparams_batch.mask_valid = true;
  3068. if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) {
  3069. throw std::invalid_argument("invalid cpumask");
  3070. }
  3071. }
  3072. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3073. add_opt(common_arg(
  3074. {"--cpu-strict-batch-draft"}, "<0|1>",
  3075. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  3076. [](common_params & params, int value) {
  3077. params.speculative.cpuparams_batch.strict_cpu = value;
  3078. }
  3079. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3080. add_opt(common_arg(
  3081. {"--prio-batch-draft"}, "N",
  3082. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority),
  3083. [](common_params & params, int prio) {
  3084. if (prio < 0 || prio > 3) {
  3085. throw std::invalid_argument("invalid value");
  3086. }
  3087. params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  3088. }
  3089. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3090. add_opt(common_arg(
  3091. {"--poll-batch-draft"}, "<0|1>",
  3092. "Use polling to wait for draft model work (default: --poll-draft)",
  3093. [](common_params & params, int value) {
  3094. params.speculative.cpuparams_batch.poll = value;
  3095. }
  3096. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  3097. add_opt(common_arg(
  3098. {"--draft-max", "--draft", "--draft-n"}, "N",
  3099. string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max),
  3100. [](common_params & params, int value) {
  3101. params.speculative.n_max = value;
  3102. }
  3103. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MAX"));
  3104. add_opt(common_arg(
  3105. {"--draft-min", "--draft-n-min"}, "N",
  3106. string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min),
  3107. [](common_params & params, int value) {
  3108. params.speculative.n_min = value;
  3109. }
  3110. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MIN"));
  3111. add_opt(common_arg(
  3112. {"--draft-p-split"}, "P",
  3113. string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split),
  3114. [](common_params & params, const std::string & value) {
  3115. params.speculative.p_split = std::stof(value);
  3116. }
  3117. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT"));
  3118. add_opt(common_arg(
  3119. {"--draft-p-min"}, "P",
  3120. string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min),
  3121. [](common_params & params, const std::string & value) {
  3122. params.speculative.p_min = std::stof(value);
  3123. }
  3124. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_P_MIN"));
  3125. add_opt(common_arg(
  3126. {"-cd", "--ctx-size-draft"}, "N",
  3127. string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx),
  3128. [](common_params & params, int value) {
  3129. params.speculative.n_ctx = value;
  3130. }
  3131. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT"));
  3132. add_opt(common_arg(
  3133. {"-devd", "--device-draft"}, "<dev1,dev2,..>",
  3134. "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n"
  3135. "use --list-devices to see a list of available devices",
  3136. [](common_params & params, const std::string & value) {
  3137. params.speculative.devices = parse_device_list(value);
  3138. }
  3139. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  3140. add_opt(common_arg(
  3141. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  3142. "number of layers to store in VRAM for the draft model",
  3143. [](common_params & params, int value) {
  3144. params.speculative.n_gpu_layers = value;
  3145. if (!llama_supports_gpu_offload()) {
  3146. fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
  3147. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  3148. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  3149. }
  3150. }
  3151. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT"));
  3152. add_opt(common_arg(
  3153. {"-md", "--model-draft"}, "FNAME",
  3154. "draft model for speculative decoding (default: unused)",
  3155. [](common_params & params, const std::string & value) {
  3156. params.speculative.model.path = value;
  3157. }
  3158. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
  3159. add_opt(common_arg(
  3160. {"--spec-replace"}, "TARGET", "DRAFT",
  3161. "translate the string in TARGET into DRAFT if the draft model and main model are not compatible",
  3162. [](common_params & params, const std::string & tgt, const std::string & dft) {
  3163. params.speculative.replacements.push_back({ tgt, dft });
  3164. }
  3165. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  3166. add_opt(common_arg(
  3167. {"-ctkd", "--cache-type-k-draft"}, "TYPE",
  3168. string_format(
  3169. "KV cache data type for K for the draft model\n"
  3170. "allowed values: %s\n"
  3171. "(default: %s)",
  3172. get_all_kv_cache_types().c_str(),
  3173. ggml_type_name(params.speculative.cache_type_k)
  3174. ),
  3175. [](common_params & params, const std::string & value) {
  3176. params.speculative.cache_type_k = kv_cache_type_from_str(value);
  3177. }
  3178. ).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT"));
  3179. add_opt(common_arg(
  3180. {"-ctvd", "--cache-type-v-draft"}, "TYPE",
  3181. string_format(
  3182. "KV cache data type for V for the draft model\n"
  3183. "allowed values: %s\n"
  3184. "(default: %s)",
  3185. get_all_kv_cache_types().c_str(),
  3186. ggml_type_name(params.speculative.cache_type_v)
  3187. ),
  3188. [](common_params & params, const std::string & value) {
  3189. params.speculative.cache_type_v = kv_cache_type_from_str(value);
  3190. }
  3191. ).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT"));
  3192. add_opt(common_arg(
  3193. {"-mv", "--model-vocoder"}, "FNAME",
  3194. "vocoder model for audio generation (default: unused)",
  3195. [](common_params & params, const std::string & value) {
  3196. params.vocoder.model.path = value;
  3197. }
  3198. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  3199. add_opt(common_arg(
  3200. {"--tts-use-guide-tokens"},
  3201. "Use guide tokens to improve TTS word recall",
  3202. [](common_params & params) {
  3203. params.vocoder.use_guide_tokens = true;
  3204. }
  3205. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  3206. add_opt(common_arg(
  3207. {"--tts-speaker-file"}, "FNAME",
  3208. "speaker file path for audio generation",
  3209. [](common_params & params, const std::string & value) {
  3210. params.vocoder.speaker_file = value;
  3211. }
  3212. ).set_examples({LLAMA_EXAMPLE_TTS}));
  3213. // model-specific
  3214. add_opt(common_arg(
  3215. {"--tts-oute-default"},
  3216. string_format("use default OuteTTS models (note: can download weights from the internet)"),
  3217. [](common_params & params) {
  3218. params.model.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
  3219. params.model.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
  3220. params.vocoder.model.hf_repo = "ggml-org/WavTokenizer";
  3221. params.vocoder.model.hf_file = "WavTokenizer-Large-75-F16.gguf";
  3222. }
  3223. ).set_examples({LLAMA_EXAMPLE_TTS}));
  3224. add_opt(common_arg(
  3225. {"--embd-bge-small-en-default"},
  3226. string_format("use default bge-small-en-v1.5 model (note: can download weights from the internet)"),
  3227. [](common_params & params) {
  3228. params.model.hf_repo = "ggml-org/bge-small-en-v1.5-Q8_0-GGUF";
  3229. params.model.hf_file = "bge-small-en-v1.5-q8_0.gguf";
  3230. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  3231. params.embd_normalize = 2;
  3232. params.n_ctx = 512;
  3233. params.verbose_prompt = true;
  3234. params.embedding = true;
  3235. }
  3236. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3237. add_opt(common_arg(
  3238. {"--embd-e5-small-en-default"},
  3239. string_format("use default e5-small-v2 model (note: can download weights from the internet)"),
  3240. [](common_params & params) {
  3241. params.model.hf_repo = "ggml-org/e5-small-v2-Q8_0-GGUF";
  3242. params.model.hf_file = "e5-small-v2-q8_0.gguf";
  3243. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  3244. params.embd_normalize = 2;
  3245. params.n_ctx = 512;
  3246. params.verbose_prompt = true;
  3247. params.embedding = true;
  3248. }
  3249. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3250. add_opt(common_arg(
  3251. {"--embd-gte-small-default"},
  3252. string_format("use default gte-small model (note: can download weights from the internet)"),
  3253. [](common_params & params) {
  3254. params.model.hf_repo = "ggml-org/gte-small-Q8_0-GGUF";
  3255. params.model.hf_file = "gte-small-q8_0.gguf";
  3256. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  3257. params.embd_normalize = 2;
  3258. params.n_ctx = 512;
  3259. params.verbose_prompt = true;
  3260. params.embedding = true;
  3261. }
  3262. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3263. add_opt(common_arg(
  3264. {"--fim-qwen-1.5b-default"},
  3265. string_format("use default Qwen 2.5 Coder 1.5B (note: can download weights from the internet)"),
  3266. [](common_params & params) {
  3267. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
  3268. params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
  3269. params.port = 8012;
  3270. params.n_gpu_layers = 99;
  3271. params.flash_attn = true;
  3272. params.n_ubatch = 1024;
  3273. params.n_batch = 1024;
  3274. params.n_ctx = 0;
  3275. params.n_cache_reuse = 256;
  3276. }
  3277. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3278. add_opt(common_arg(
  3279. {"--fim-qwen-3b-default"},
  3280. string_format("use default Qwen 2.5 Coder 3B (note: can download weights from the internet)"),
  3281. [](common_params & params) {
  3282. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
  3283. params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
  3284. params.port = 8012;
  3285. params.n_gpu_layers = 99;
  3286. params.flash_attn = true;
  3287. params.n_ubatch = 1024;
  3288. params.n_batch = 1024;
  3289. params.n_ctx = 0;
  3290. params.n_cache_reuse = 256;
  3291. }
  3292. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3293. add_opt(common_arg(
  3294. {"--fim-qwen-7b-default"},
  3295. string_format("use default Qwen 2.5 Coder 7B (note: can download weights from the internet)"),
  3296. [](common_params & params) {
  3297. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3298. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3299. params.port = 8012;
  3300. params.n_gpu_layers = 99;
  3301. params.flash_attn = true;
  3302. params.n_ubatch = 1024;
  3303. params.n_batch = 1024;
  3304. params.n_ctx = 0;
  3305. params.n_cache_reuse = 256;
  3306. }
  3307. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3308. add_opt(common_arg(
  3309. {"--fim-qwen-7b-spec"},
  3310. string_format("use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3311. [](common_params & params) {
  3312. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3313. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3314. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3315. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3316. params.speculative.n_gpu_layers = 99;
  3317. params.port = 8012;
  3318. params.n_gpu_layers = 99;
  3319. params.flash_attn = true;
  3320. params.n_ubatch = 1024;
  3321. params.n_batch = 1024;
  3322. params.n_ctx = 0;
  3323. params.n_cache_reuse = 256;
  3324. }
  3325. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3326. add_opt(common_arg(
  3327. {"--fim-qwen-14b-spec"},
  3328. string_format("use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3329. [](common_params & params) {
  3330. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-14B-Q8_0-GGUF";
  3331. params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
  3332. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3333. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3334. params.speculative.n_gpu_layers = 99;
  3335. params.port = 8012;
  3336. params.n_gpu_layers = 99;
  3337. params.flash_attn = true;
  3338. params.n_ubatch = 1024;
  3339. params.n_batch = 1024;
  3340. params.n_ctx = 0;
  3341. params.n_cache_reuse = 256;
  3342. }
  3343. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3344. add_opt(common_arg(
  3345. { "--diffusion-steps" }, "N",
  3346. string_format("number of diffusion steps (default: %d)", params.diffusion.steps),
  3347. [](common_params & params, int value) { params.diffusion.steps = value; }
  3348. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3349. add_opt(common_arg(
  3350. { "--diffusion-visual" },
  3351. string_format("enable visual diffusion mode (show progressive generation) (default: %s)",
  3352. params.diffusion.visual_mode ? "true" : "false"),
  3353. [](common_params & params) { params.diffusion.visual_mode = true; }
  3354. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3355. add_opt(common_arg(
  3356. { "--diffusion-eps" }, "F",
  3357. string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps),
  3358. [](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); }
  3359. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3360. add_opt(common_arg(
  3361. { "--diffusion-algorithm" }, "N",
  3362. string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)",
  3363. params.diffusion.algorithm),
  3364. [](common_params & params, int value) { params.diffusion.algorithm = value; }
  3365. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3366. add_opt(common_arg(
  3367. { "--diffusion-alg-temp" }, "F",
  3368. string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
  3369. [](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); }
  3370. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3371. add_opt(common_arg(
  3372. { "--diffusion-block-length" }, "N",
  3373. string_format("llada block length for generation (default: %d)", params.diffusion.block_length),
  3374. [](common_params & params, int value) { params.diffusion.block_length = value; }
  3375. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3376. add_opt(common_arg(
  3377. { "--diffusion-cfg-scale" }, "F",
  3378. string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale),
  3379. [](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); }
  3380. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3381. add_opt(common_arg(
  3382. { "--diffusion-add-gumbel-noise" }, "F",
  3383. string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"),
  3384. [](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); }
  3385. ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
  3386. return ctx_arg;
  3387. }