arg.cpp 137 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278
  1. #include "gguf.h" // for reading GGUF splits
  2. #include "arg.h"
  3. #include "common.h"
  4. #include "log.h"
  5. #include "sampling.h"
  6. #include "chat.h"
  7. // fix problem with std::min and std::max
  8. #if defined(_WIN32)
  9. #define WIN32_LEAN_AND_MEAN
  10. #ifndef NOMINMAX
  11. # define NOMINMAX
  12. #endif
  13. #include <windows.h>
  14. #endif
  15. #include <algorithm>
  16. #include <climits>
  17. #include <cstdarg>
  18. #include <filesystem>
  19. #include <fstream>
  20. #include <regex>
  21. #include <set>
  22. #include <string>
  23. #include <thread>
  24. #include <vector>
  25. //#define LLAMA_USE_CURL
  26. #if defined(LLAMA_USE_CURL)
  27. #include <curl/curl.h>
  28. #include <curl/easy.h>
  29. #include <future>
  30. #endif
  31. #include "json-schema-to-grammar.h"
  32. using json = nlohmann::ordered_json;
  33. std::initializer_list<enum llama_example> mmproj_examples = {
  34. LLAMA_EXAMPLE_LLAVA,
  35. // TODO: add LLAMA_EXAMPLE_SERVER when it's ready
  36. };
  37. common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) {
  38. this->examples = std::move(examples);
  39. return *this;
  40. }
  41. common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
  42. this->excludes = std::move(excludes);
  43. return *this;
  44. }
  45. common_arg & common_arg::set_env(const char * env) {
  46. help = help + "\n(env: " + env + ")";
  47. this->env = env;
  48. return *this;
  49. }
  50. common_arg & common_arg::set_sparam() {
  51. is_sparam = true;
  52. return *this;
  53. }
  54. bool common_arg::in_example(enum llama_example ex) {
  55. return examples.find(ex) != examples.end();
  56. }
  57. bool common_arg::is_exclude(enum llama_example ex) {
  58. return excludes.find(ex) != excludes.end();
  59. }
  60. bool common_arg::get_value_from_env(std::string & output) {
  61. if (env == nullptr) return false;
  62. char * value = std::getenv(env);
  63. if (value) {
  64. output = value;
  65. return true;
  66. }
  67. return false;
  68. }
  69. bool common_arg::has_value_from_env() {
  70. return env != nullptr && std::getenv(env);
  71. }
  72. static std::vector<std::string> break_str_into_lines(std::string input, size_t max_char_per_line) {
  73. std::vector<std::string> result;
  74. std::istringstream iss(input);
  75. std::string line;
  76. auto add_line = [&](const std::string& l) {
  77. if (l.length() <= max_char_per_line) {
  78. result.push_back(l);
  79. } else {
  80. std::istringstream line_stream(l);
  81. std::string word, current_line;
  82. while (line_stream >> word) {
  83. if (current_line.length() + !current_line.empty() + word.length() > max_char_per_line) {
  84. if (!current_line.empty()) result.push_back(current_line);
  85. current_line = word;
  86. } else {
  87. current_line += (!current_line.empty() ? " " : "") + word;
  88. }
  89. }
  90. if (!current_line.empty()) result.push_back(current_line);
  91. }
  92. };
  93. while (std::getline(iss, line)) {
  94. add_line(line);
  95. }
  96. return result;
  97. }
  98. std::string common_arg::to_string() {
  99. // params for printing to console
  100. const static int n_leading_spaces = 40;
  101. const static int n_char_per_line_help = 70; // TODO: detect this based on current console
  102. std::string leading_spaces(n_leading_spaces, ' ');
  103. std::ostringstream ss;
  104. for (const auto arg : args) {
  105. if (arg == args.front()) {
  106. if (args.size() == 1) {
  107. ss << arg;
  108. } else {
  109. // first arg is usually abbreviation, we need padding to make it more beautiful
  110. auto tmp = std::string(arg) + ", ";
  111. auto spaces = std::string(std::max(0, 7 - (int)tmp.size()), ' ');
  112. ss << tmp << spaces;
  113. }
  114. } else {
  115. ss << arg << (arg != args.back() ? ", " : "");
  116. }
  117. }
  118. if (value_hint) ss << " " << value_hint;
  119. if (value_hint_2) ss << " " << value_hint_2;
  120. if (ss.tellp() > n_leading_spaces - 3) {
  121. // current line is too long, add new line
  122. ss << "\n" << leading_spaces;
  123. } else {
  124. // padding between arg and help, same line
  125. ss << std::string(leading_spaces.size() - ss.tellp(), ' ');
  126. }
  127. const auto help_lines = break_str_into_lines(help, n_char_per_line_help);
  128. for (const auto & line : help_lines) {
  129. ss << (&line == &help_lines.front() ? "" : leading_spaces) << line << "\n";
  130. }
  131. return ss.str();
  132. }
  133. //
  134. // downloader
  135. //
  136. struct common_hf_file_res {
  137. std::string repo; // repo name with ":tag" removed
  138. std::string ggufFile;
  139. std::string mmprojFile;
  140. };
  141. #ifdef LLAMA_USE_CURL
  142. bool common_has_curl() {
  143. return true;
  144. }
  145. #ifdef __linux__
  146. #include <linux/limits.h>
  147. #elif defined(_WIN32)
  148. # if !defined(PATH_MAX)
  149. # define PATH_MAX MAX_PATH
  150. # endif
  151. #elif defined(_AIX)
  152. #include <sys/limits.h>
  153. #else
  154. #include <sys/syslimits.h>
  155. #endif
  156. #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
  157. //
  158. // CURL utils
  159. //
  160. using curl_ptr = std::unique_ptr<CURL, decltype(&curl_easy_cleanup)>;
  161. // cannot use unique_ptr for curl_slist, because we cannot update without destroying the old one
  162. struct curl_slist_ptr {
  163. struct curl_slist * ptr = nullptr;
  164. ~curl_slist_ptr() {
  165. if (ptr) {
  166. curl_slist_free_all(ptr);
  167. }
  168. }
  169. };
  170. #define CURL_MAX_RETRY 3
  171. #define CURL_RETRY_DELAY_SECONDS 2
  172. static bool curl_perform_with_retry(const std::string & url, CURL * curl, int max_attempts, int retry_delay_seconds) {
  173. int remaining_attempts = max_attempts;
  174. while (remaining_attempts > 0) {
  175. LOG_INF("%s: Trying to download from %s (attempt %d of %d)...\n", __func__ , url.c_str(), max_attempts - remaining_attempts + 1, max_attempts);
  176. CURLcode res = curl_easy_perform(curl);
  177. if (res == CURLE_OK) {
  178. return true;
  179. }
  180. int exponential_backoff_delay = std::pow(retry_delay_seconds, max_attempts - remaining_attempts) * 1000;
  181. LOG_WRN("%s: curl_easy_perform() failed: %s, retrying after %d milliseconds...\n", __func__, curl_easy_strerror(res), exponential_backoff_delay);
  182. remaining_attempts--;
  183. std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
  184. }
  185. LOG_ERR("%s: curl_easy_perform() failed after %d attempts\n", __func__, max_attempts);
  186. return false;
  187. }
  188. // download one single file from remote URL to local path
  189. static bool common_download_file_single(const std::string & url, const std::string & path, const std::string & bearer_token) {
  190. // Initialize libcurl
  191. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  192. curl_slist_ptr http_headers;
  193. if (!curl) {
  194. LOG_ERR("%s: error initializing libcurl\n", __func__);
  195. return false;
  196. }
  197. bool force_download = false;
  198. // Set the URL, allow to follow http redirection
  199. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  200. curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
  201. http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
  202. // Check if hf-token or bearer-token was specified
  203. if (!bearer_token.empty()) {
  204. std::string auth_header = "Authorization: Bearer " + bearer_token;
  205. http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
  206. }
  207. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  208. #if defined(_WIN32)
  209. // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
  210. // operating system. Currently implemented under MS-Windows.
  211. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  212. #endif
  213. // Check if the file already exists locally
  214. auto file_exists = std::filesystem::exists(path);
  215. // If the file exists, check its JSON metadata companion file.
  216. std::string metadata_path = path + ".json";
  217. nlohmann::json metadata;
  218. std::string etag;
  219. std::string last_modified;
  220. if (file_exists) {
  221. // Try and read the JSON metadata file (note: stream autoclosed upon exiting this block).
  222. std::ifstream metadata_in(metadata_path);
  223. if (metadata_in.good()) {
  224. try {
  225. metadata_in >> metadata;
  226. LOG_INF("%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
  227. if (metadata.contains("url") && metadata.at("url").is_string()) {
  228. auto previous_url = metadata.at("url").get<std::string>();
  229. if (previous_url != url) {
  230. LOG_ERR("%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str());
  231. return false;
  232. }
  233. }
  234. if (metadata.contains("etag") && metadata.at("etag").is_string()) {
  235. etag = metadata.at("etag");
  236. }
  237. if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
  238. last_modified = metadata.at("lastModified");
  239. }
  240. } catch (const nlohmann::json::exception & e) {
  241. LOG_ERR("%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
  242. return false;
  243. }
  244. }
  245. } else {
  246. LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str());
  247. }
  248. // Send a HEAD request to retrieve the etag and last-modified headers
  249. struct common_load_model_from_url_headers {
  250. std::string etag;
  251. std::string last_modified;
  252. };
  253. common_load_model_from_url_headers headers;
  254. {
  255. typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *);
  256. auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
  257. common_load_model_from_url_headers * headers = (common_load_model_from_url_headers *) userdata;
  258. static std::regex header_regex("([^:]+): (.*)\r\n");
  259. static std::regex etag_regex("ETag", std::regex_constants::icase);
  260. static std::regex last_modified_regex("Last-Modified", std::regex_constants::icase);
  261. std::string header(buffer, n_items);
  262. std::smatch match;
  263. if (std::regex_match(header, match, header_regex)) {
  264. const std::string & key = match[1];
  265. const std::string & value = match[2];
  266. if (std::regex_match(key, match, etag_regex)) {
  267. headers->etag = value;
  268. } else if (std::regex_match(key, match, last_modified_regex)) {
  269. headers->last_modified = value;
  270. }
  271. }
  272. return n_items;
  273. };
  274. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
  275. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); // hide head request progress
  276. curl_easy_setopt(curl.get(), CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
  277. curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
  278. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
  279. if (!was_perform_successful) {
  280. return false;
  281. }
  282. long http_code = 0;
  283. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  284. if (http_code != 200) {
  285. // HEAD not supported, we don't know if the file has changed
  286. // force trigger downloading
  287. force_download = true;
  288. LOG_ERR("%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
  289. }
  290. }
  291. bool should_download = !file_exists || force_download;
  292. if (!should_download) {
  293. if (!etag.empty() && etag != headers.etag) {
  294. LOG_WRN("%s: ETag header is different (%s != %s): triggering a new download\n", __func__, etag.c_str(), headers.etag.c_str());
  295. should_download = true;
  296. } else if (!last_modified.empty() && last_modified != headers.last_modified) {
  297. LOG_WRN("%s: Last-Modified header is different (%s != %s): triggering a new download\n", __func__, last_modified.c_str(), headers.last_modified.c_str());
  298. should_download = true;
  299. }
  300. }
  301. if (should_download) {
  302. std::string path_temporary = path + ".downloadInProgress";
  303. if (file_exists) {
  304. LOG_WRN("%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
  305. if (remove(path.c_str()) != 0) {
  306. LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
  307. return false;
  308. }
  309. }
  310. // Set the output file
  311. struct FILE_deleter {
  312. void operator()(FILE * f) const {
  313. fclose(f);
  314. }
  315. };
  316. std::unique_ptr<FILE, FILE_deleter> outfile(fopen(path_temporary.c_str(), "wb"));
  317. if (!outfile) {
  318. LOG_ERR("%s: error opening local file for writing: %s\n", __func__, path.c_str());
  319. return false;
  320. }
  321. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * data, size_t size, size_t nmemb, void * fd);
  322. auto write_callback = [](void * data, size_t size, size_t nmemb, void * fd) -> size_t {
  323. return fwrite(data, size, nmemb, (FILE *)fd);
  324. };
  325. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 0L);
  326. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  327. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, outfile.get());
  328. // display download progress
  329. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 0L);
  330. // helper function to hide password in URL
  331. auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
  332. std::size_t protocol_pos = url.find("://");
  333. if (protocol_pos == std::string::npos) {
  334. return url; // Malformed URL
  335. }
  336. std::size_t at_pos = url.find('@', protocol_pos + 3);
  337. if (at_pos == std::string::npos) {
  338. return url; // No password in URL
  339. }
  340. return url.substr(0, protocol_pos + 3) + "********" + url.substr(at_pos);
  341. };
  342. // start the download
  343. LOG_INF("%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
  344. llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
  345. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
  346. if (!was_perform_successful) {
  347. return false;
  348. }
  349. long http_code = 0;
  350. curl_easy_getinfo (curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  351. if (http_code < 200 || http_code >= 400) {
  352. LOG_ERR("%s: invalid http status code received: %ld\n", __func__, http_code);
  353. return false;
  354. }
  355. // Causes file to be closed explicitly here before we rename it.
  356. outfile.reset();
  357. // Write the updated JSON metadata file.
  358. metadata.update({
  359. {"url", url},
  360. {"etag", headers.etag},
  361. {"lastModified", headers.last_modified}
  362. });
  363. std::ofstream(metadata_path) << metadata.dump(4);
  364. LOG_INF("%s: file metadata saved: %s\n", __func__, metadata_path.c_str());
  365. if (rename(path_temporary.c_str(), path.c_str()) != 0) {
  366. LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
  367. return false;
  368. }
  369. }
  370. return true;
  371. }
  372. // download multiple files from remote URLs to local paths
  373. // the input is a vector of pairs <url, path>
  374. static bool common_download_file_multiple(const std::vector<std::pair<std::string, std::string>> & urls, const std::string & bearer_token) {
  375. // Prepare download in parallel
  376. std::vector<std::future<bool>> futures_download;
  377. for (auto const & item : urls) {
  378. futures_download.push_back(std::async(std::launch::async, [bearer_token](const std::pair<std::string, std::string> & it) -> bool {
  379. return common_download_file_single(it.first, it.second, bearer_token);
  380. }, item));
  381. }
  382. // Wait for all downloads to complete
  383. for (auto & f : futures_download) {
  384. if (!f.get()) {
  385. return false;
  386. }
  387. }
  388. return true;
  389. }
  390. static bool common_download_model(
  391. const common_params_model & model,
  392. const std::string & bearer_token) {
  393. // Basic validation of the model.url
  394. if (model.url.empty()) {
  395. LOG_ERR("%s: invalid model url\n", __func__);
  396. return false;
  397. }
  398. if (!common_download_file_single(model.url, model.path, bearer_token)) {
  399. return false;
  400. }
  401. // check for additional GGUFs split to download
  402. int n_split = 0;
  403. {
  404. struct gguf_init_params gguf_params = {
  405. /*.no_alloc = */ true,
  406. /*.ctx = */ NULL,
  407. };
  408. auto * ctx_gguf = gguf_init_from_file(model.path.c_str(), gguf_params);
  409. if (!ctx_gguf) {
  410. LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, model.path.c_str());
  411. return false;
  412. }
  413. auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
  414. if (key_n_split >= 0) {
  415. n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
  416. }
  417. gguf_free(ctx_gguf);
  418. }
  419. if (n_split > 1) {
  420. char split_prefix[PATH_MAX] = {0};
  421. char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  422. // Verify the first split file format
  423. // and extract split URL and PATH prefixes
  424. {
  425. if (!llama_split_prefix(split_prefix, sizeof(split_prefix), model.path.c_str(), 0, n_split)) {
  426. LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, model.path.c_str(), n_split);
  427. return false;
  428. }
  429. if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model.url.c_str(), 0, n_split)) {
  430. LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model.url.c_str(), n_split);
  431. return false;
  432. }
  433. }
  434. std::vector<std::pair<std::string, std::string>> urls;
  435. for (int idx = 1; idx < n_split; idx++) {
  436. char split_path[PATH_MAX] = {0};
  437. llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
  438. char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  439. llama_split_path(split_url, sizeof(split_url), split_url_prefix, idx, n_split);
  440. if (std::string(split_path) == model.path) {
  441. continue; // skip the already downloaded file
  442. }
  443. urls.push_back({split_url, split_path});
  444. }
  445. // Download in parallel
  446. common_download_file_multiple(urls, bearer_token);
  447. }
  448. return true;
  449. }
  450. std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params & params) {
  451. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  452. curl_slist_ptr http_headers;
  453. std::vector<char> res_buffer;
  454. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  455. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
  456. curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
  457. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
  458. auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t {
  459. auto data_vec = static_cast<std::vector<char> *>(data);
  460. data_vec->insert(data_vec->end(), (char *)ptr, (char *)ptr + size * nmemb);
  461. return size * nmemb;
  462. };
  463. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  464. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_buffer);
  465. #if defined(_WIN32)
  466. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  467. #endif
  468. if (params.timeout > 0) {
  469. curl_easy_setopt(curl.get(), CURLOPT_TIMEOUT, params.timeout);
  470. }
  471. if (params.max_size > 0) {
  472. curl_easy_setopt(curl.get(), CURLOPT_MAXFILESIZE, params.max_size);
  473. }
  474. http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
  475. for (const auto & header : params.headers) {
  476. http_headers.ptr = curl_slist_append(http_headers.ptr, header.c_str());
  477. }
  478. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  479. CURLcode res = curl_easy_perform(curl.get());
  480. if (res != CURLE_OK) {
  481. std::string error_msg = curl_easy_strerror(res);
  482. throw std::runtime_error("error: cannot make GET request: " + error_msg);
  483. }
  484. long res_code;
  485. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code);
  486. return { res_code, std::move(res_buffer) };
  487. }
  488. /**
  489. * Allow getting the HF file from the HF repo with tag (like ollama), for example:
  490. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q4
  491. * - bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
  492. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q5_k_s
  493. * Tag is optional, default to "latest" (meaning it checks for Q4_K_M first, then Q4, then if not found, return the first GGUF file in repo)
  494. *
  495. * Return pair of <repo, file> (with "repo" already having tag removed)
  496. *
  497. * Note: we use the Ollama-compatible HF API, but not using the blobId. Instead, we use the special "ggufFile" field which returns the value for "hf_file". This is done to be backward-compatible with existing cache files.
  498. */
  499. static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_with_tag, const std::string & bearer_token) {
  500. auto parts = string_split<std::string>(hf_repo_with_tag, ':');
  501. std::string tag = parts.size() > 1 ? parts.back() : "latest";
  502. std::string hf_repo = parts[0];
  503. if (string_split<std::string>(hf_repo, '/').size() != 2) {
  504. throw std::invalid_argument("error: invalid HF repo format, expected <user>/<model>[:quant]\n");
  505. }
  506. std::string url = get_model_endpoint() + "v2/" + hf_repo + "/manifests/" + tag;
  507. // headers
  508. std::vector<std::string> headers;
  509. headers.push_back("Accept: application/json");
  510. if (!bearer_token.empty()) {
  511. headers.push_back("Authorization: Bearer " + bearer_token);
  512. }
  513. // Important: the User-Agent must be "llama-cpp" to get the "ggufFile" field in the response
  514. // User-Agent header is already set in common_remote_get_content, no need to set it here
  515. // make the request
  516. common_remote_params params;
  517. params.headers = headers;
  518. auto res = common_remote_get_content(url, params);
  519. long res_code = res.first;
  520. std::string res_str(res.second.data(), res.second.size());
  521. std::string ggufFile;
  522. std::string mmprojFile;
  523. if (res_code == 200) {
  524. // extract ggufFile.rfilename in json, using regex
  525. {
  526. std::regex pattern("\"ggufFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\"");
  527. std::smatch match;
  528. if (std::regex_search(res_str, match, pattern)) {
  529. ggufFile = match[1].str();
  530. }
  531. }
  532. // extract mmprojFile.rfilename in json, using regex
  533. {
  534. std::regex pattern("\"mmprojFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\"");
  535. std::smatch match;
  536. if (std::regex_search(res_str, match, pattern)) {
  537. mmprojFile = match[1].str();
  538. }
  539. }
  540. } else if (res_code == 401) {
  541. throw std::runtime_error("error: model is private or does not exist; if you are accessing a gated model, please provide a valid HF token");
  542. } else {
  543. throw std::runtime_error(string_format("error from HF API, response code: %ld, data: %s", res_code, res_str.c_str()));
  544. }
  545. // check response
  546. if (ggufFile.empty()) {
  547. throw std::runtime_error("error: model does not have ggufFile");
  548. }
  549. return { hf_repo, ggufFile, mmprojFile };
  550. }
  551. #else
  552. bool common_has_curl() {
  553. return false;
  554. }
  555. static bool common_download_file_single(const std::string &, const std::string &, const std::string &) {
  556. LOG_ERR("error: built without CURL, cannot download model from internet\n");
  557. return false;
  558. }
  559. static bool common_download_file_multiple(const std::vector<std::pair<std::string, std::string>> &, const std::string &) {
  560. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  561. return false;
  562. }
  563. static bool common_download_model(
  564. const common_params_model &,
  565. const std::string &) {
  566. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  567. return false;
  568. }
  569. static struct common_hf_file_res common_get_hf_file(const std::string &, const std::string &) {
  570. LOG_ERR("error: built without CURL, cannot download model from the internet\n");
  571. return {};
  572. }
  573. std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params & params) {
  574. throw std::runtime_error("error: built without CURL, cannot download model from the internet");
  575. }
  576. #endif // LLAMA_USE_CURL
  577. //
  578. // utils
  579. //
  580. struct handle_model_result {
  581. bool found_mmproj = false;
  582. common_params_model mmproj;
  583. };
  584. static handle_model_result common_params_handle_model(
  585. struct common_params_model & model,
  586. const std::string & bearer_token,
  587. const std::string & model_path_default) {
  588. handle_model_result result;
  589. // handle pre-fill default model path and url based on hf_repo and hf_file
  590. {
  591. if (!model.hf_repo.empty()) {
  592. // short-hand to avoid specifying --hf-file -> default it to --model
  593. if (model.hf_file.empty()) {
  594. if (model.path.empty()) {
  595. auto auto_detected = common_get_hf_file(model.hf_repo, bearer_token);
  596. if (auto_detected.repo.empty() || auto_detected.ggufFile.empty()) {
  597. exit(1); // built without CURL, error message already printed
  598. }
  599. model.hf_repo = auto_detected.repo;
  600. model.hf_file = auto_detected.ggufFile;
  601. if (!auto_detected.mmprojFile.empty()) {
  602. result.found_mmproj = true;
  603. result.mmproj.hf_repo = model.hf_repo;
  604. result.mmproj.hf_file = auto_detected.mmprojFile;
  605. }
  606. } else {
  607. model.hf_file = model.path;
  608. }
  609. }
  610. std::string model_endpoint = get_model_endpoint();
  611. model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
  612. // make sure model path is present (for caching purposes)
  613. if (model.path.empty()) {
  614. // this is to avoid different repo having same file name, or same file name in different subdirs
  615. std::string filename = model.hf_repo + "_" + model.hf_file;
  616. // to make sure we don't have any slashes in the filename
  617. string_replace_all(filename, "/", "_");
  618. model.path = fs_get_cache_file(filename);
  619. }
  620. } else if (!model.url.empty()) {
  621. if (model.path.empty()) {
  622. auto f = string_split<std::string>(model.url, '#').front();
  623. f = string_split<std::string>(f, '?').front();
  624. model.path = fs_get_cache_file(string_split<std::string>(f, '/').back());
  625. }
  626. } else if (model.path.empty()) {
  627. model.path = model_path_default;
  628. }
  629. }
  630. // then, download it if needed
  631. if (!model.url.empty()) {
  632. bool ok = common_download_model(model, bearer_token);
  633. if (!ok) {
  634. LOG_ERR("error: failed to download model from %s\n", model.url.c_str());
  635. exit(1);
  636. }
  637. }
  638. return result;
  639. }
  640. const std::vector<ggml_type> kv_cache_types = {
  641. GGML_TYPE_F32,
  642. GGML_TYPE_F16,
  643. GGML_TYPE_BF16,
  644. GGML_TYPE_Q8_0,
  645. GGML_TYPE_Q4_0,
  646. GGML_TYPE_Q4_1,
  647. GGML_TYPE_IQ4_NL,
  648. GGML_TYPE_Q5_0,
  649. GGML_TYPE_Q5_1,
  650. };
  651. static ggml_type kv_cache_type_from_str(const std::string & s) {
  652. for (const auto & type : kv_cache_types) {
  653. if (ggml_type_name(type) == s) {
  654. return type;
  655. }
  656. }
  657. throw std::runtime_error("Unsupported cache type: " + s);
  658. }
  659. static std::string get_all_kv_cache_types() {
  660. std::ostringstream msg;
  661. for (const auto & type : kv_cache_types) {
  662. msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
  663. }
  664. return msg.str();
  665. }
  666. //
  667. // CLI argument parsing functions
  668. //
  669. static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) {
  670. std::string arg;
  671. const std::string arg_prefix = "--";
  672. common_params & params = ctx_arg.params;
  673. std::unordered_map<std::string, common_arg *> arg_to_options;
  674. for (auto & opt : ctx_arg.options) {
  675. for (const auto & arg : opt.args) {
  676. arg_to_options[arg] = &opt;
  677. }
  678. }
  679. // handle environment variables
  680. for (auto & opt : ctx_arg.options) {
  681. std::string value;
  682. if (opt.get_value_from_env(value)) {
  683. try {
  684. if (opt.handler_void && (value == "1" || value == "true")) {
  685. opt.handler_void(params);
  686. }
  687. if (opt.handler_int) {
  688. opt.handler_int(params, std::stoi(value));
  689. }
  690. if (opt.handler_string) {
  691. opt.handler_string(params, value);
  692. continue;
  693. }
  694. } catch (std::exception & e) {
  695. throw std::invalid_argument(string_format(
  696. "error while handling environment variable \"%s\": %s\n\n", opt.env, e.what()));
  697. }
  698. }
  699. }
  700. // handle command line arguments
  701. auto check_arg = [&](int i) {
  702. if (i+1 >= argc) {
  703. throw std::invalid_argument("expected value for argument");
  704. }
  705. };
  706. for (int i = 1; i < argc; i++) {
  707. const std::string arg_prefix = "--";
  708. std::string arg = argv[i];
  709. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  710. std::replace(arg.begin(), arg.end(), '_', '-');
  711. }
  712. if (arg_to_options.find(arg) == arg_to_options.end()) {
  713. throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str()));
  714. }
  715. auto opt = *arg_to_options[arg];
  716. if (opt.has_value_from_env()) {
  717. fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str());
  718. }
  719. try {
  720. if (opt.handler_void) {
  721. opt.handler_void(params);
  722. continue;
  723. }
  724. // arg with single value
  725. check_arg(i);
  726. std::string val = argv[++i];
  727. if (opt.handler_int) {
  728. opt.handler_int(params, std::stoi(val));
  729. continue;
  730. }
  731. if (opt.handler_string) {
  732. opt.handler_string(params, val);
  733. continue;
  734. }
  735. // arg with 2 values
  736. check_arg(i);
  737. std::string val2 = argv[++i];
  738. if (opt.handler_str_str) {
  739. opt.handler_str_str(params, val, val2);
  740. continue;
  741. }
  742. } catch (std::exception & e) {
  743. throw std::invalid_argument(string_format(
  744. "error while handling argument \"%s\": %s\n\n"
  745. "usage:\n%s\n\nto show complete usage, run with -h",
  746. arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str()));
  747. }
  748. }
  749. postprocess_cpu_params(params.cpuparams, nullptr);
  750. postprocess_cpu_params(params.cpuparams_batch, &params.cpuparams);
  751. postprocess_cpu_params(params.speculative.cpuparams, &params.cpuparams);
  752. postprocess_cpu_params(params.speculative.cpuparams_batch, &params.cpuparams_batch);
  753. if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
  754. throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
  755. }
  756. // handle model and download
  757. {
  758. auto res = common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH);
  759. if (params.no_mmproj) {
  760. params.mmproj = {};
  761. } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) {
  762. // optionally, handle mmproj model when -hf is specified
  763. params.mmproj = res.mmproj;
  764. }
  765. // only download mmproj if the current example is using it
  766. for (auto & ex : mmproj_examples) {
  767. if (ctx_arg.ex == ex) {
  768. common_params_handle_model(params.mmproj, params.hf_token, "");
  769. break;
  770. }
  771. }
  772. common_params_handle_model(params.speculative.model, params.hf_token, "");
  773. common_params_handle_model(params.vocoder.model, params.hf_token, "");
  774. }
  775. if (params.escape) {
  776. string_process_escapes(params.prompt);
  777. string_process_escapes(params.input_prefix);
  778. string_process_escapes(params.input_suffix);
  779. for (auto & antiprompt : params.antiprompt) {
  780. string_process_escapes(antiprompt);
  781. }
  782. for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
  783. string_process_escapes(seq_breaker);
  784. }
  785. }
  786. if (!params.kv_overrides.empty()) {
  787. params.kv_overrides.emplace_back();
  788. params.kv_overrides.back().key[0] = 0;
  789. }
  790. if (!params.tensor_buft_overrides.empty()) {
  791. params.tensor_buft_overrides.push_back({nullptr, nullptr});
  792. }
  793. if (params.reranking && params.embedding) {
  794. throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both");
  795. }
  796. if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
  797. throw std::runtime_error(string_format(
  798. "error: the supplied chat template is not supported: %s%s\n",
  799. params.chat_template.c_str(),
  800. params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
  801. ));
  802. }
  803. return true;
  804. }
  805. static void common_params_print_usage(common_params_context & ctx_arg) {
  806. auto print_options = [](std::vector<common_arg *> & options) {
  807. for (common_arg * opt : options) {
  808. printf("%s", opt->to_string().c_str());
  809. }
  810. };
  811. std::vector<common_arg *> common_options;
  812. std::vector<common_arg *> sparam_options;
  813. std::vector<common_arg *> specific_options;
  814. for (auto & opt : ctx_arg.options) {
  815. // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example
  816. if (opt.is_sparam) {
  817. sparam_options.push_back(&opt);
  818. } else if (opt.in_example(ctx_arg.ex)) {
  819. specific_options.push_back(&opt);
  820. } else {
  821. common_options.push_back(&opt);
  822. }
  823. }
  824. printf("----- common params -----\n\n");
  825. print_options(common_options);
  826. printf("\n\n----- sampling params -----\n\n");
  827. print_options(sparam_options);
  828. // TODO: maybe convert enum llama_example to string
  829. printf("\n\n----- example-specific params -----\n\n");
  830. print_options(specific_options);
  831. }
  832. static void common_params_print_completion(common_params_context & ctx_arg) {
  833. std::vector<common_arg *> common_options;
  834. std::vector<common_arg *> sparam_options;
  835. std::vector<common_arg *> specific_options;
  836. for (auto & opt : ctx_arg.options) {
  837. if (opt.is_sparam) {
  838. sparam_options.push_back(&opt);
  839. } else if (opt.in_example(ctx_arg.ex)) {
  840. specific_options.push_back(&opt);
  841. } else {
  842. common_options.push_back(&opt);
  843. }
  844. }
  845. printf("_llama_completions() {\n");
  846. printf(" local cur prev opts\n");
  847. printf(" COMPREPLY=()\n");
  848. printf(" cur=\"${COMP_WORDS[COMP_CWORD]}\"\n");
  849. printf(" prev=\"${COMP_WORDS[COMP_CWORD-1]}\"\n\n");
  850. printf(" opts=\"");
  851. auto print_options = [](const std::vector<common_arg *> & options) {
  852. for (const common_arg * opt : options) {
  853. for (const char * arg : opt->args) {
  854. printf("%s ", arg);
  855. }
  856. }
  857. };
  858. print_options(common_options);
  859. print_options(sparam_options);
  860. print_options(specific_options);
  861. printf("\"\n\n");
  862. printf(" case \"$prev\" in\n");
  863. printf(" --model)\n");
  864. printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  865. printf(" return 0\n");
  866. printf(" ;;\n");
  867. printf(" --grammar-file)\n");
  868. printf(" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  869. printf(" return 0\n");
  870. printf(" ;;\n");
  871. printf(" --chat-template-file)\n");
  872. printf(" COMPREPLY=( $(compgen -f -X '!*.jinja' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
  873. printf(" return 0\n");
  874. printf(" ;;\n");
  875. printf(" *)\n");
  876. printf(" COMPREPLY=( $(compgen -W \"${opts}\" -- \"$cur\") )\n");
  877. printf(" return 0\n");
  878. printf(" ;;\n");
  879. printf(" esac\n");
  880. printf("}\n\n");
  881. std::set<std::string> executables = {
  882. "llama-batched",
  883. "llama-batched-bench",
  884. "llama-bench",
  885. "llama-cli",
  886. "llama-convert-llama2c-to-ggml",
  887. "llama-cvector-generator",
  888. "llama-embedding",
  889. "llama-eval-callback",
  890. "llama-export-lora",
  891. "llama-gen-docs",
  892. "llama-gguf",
  893. "llama-gguf-hash",
  894. "llama-gguf-split",
  895. "llama-gritlm",
  896. "llama-imatrix",
  897. "llama-infill",
  898. "llama-mtmd-cli",
  899. "llama-llava-clip-quantize-cli",
  900. "llama-lookahead",
  901. "llama-lookup",
  902. "llama-lookup-create",
  903. "llama-lookup-merge",
  904. "llama-lookup-stats",
  905. "llama-parallel",
  906. "llama-passkey",
  907. "llama-perplexity",
  908. "llama-q8dot",
  909. "llama-quantize",
  910. "llama-qwen2vl-cli",
  911. "llama-retrieval",
  912. "llama-run",
  913. "llama-save-load-state",
  914. "llama-server",
  915. "llama-simple",
  916. "llama-simple-chat",
  917. "llama-speculative",
  918. "llama-speculative-simple",
  919. "llama-tokenize",
  920. "llama-tts",
  921. "llama-vdot"
  922. };
  923. for (const auto& exe : executables) {
  924. printf("complete -F _llama_completions %s\n", exe.c_str());
  925. }
  926. }
  927. static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & value) {
  928. std::vector<ggml_backend_dev_t> devices;
  929. auto dev_names = string_split<std::string>(value, ',');
  930. if (dev_names.empty()) {
  931. throw std::invalid_argument("no devices specified");
  932. }
  933. if (dev_names.size() == 1 && dev_names[0] == "none") {
  934. devices.push_back(nullptr);
  935. } else {
  936. for (const auto & device : dev_names) {
  937. auto * dev = ggml_backend_dev_by_name(device.c_str());
  938. if (!dev || ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_GPU) {
  939. throw std::invalid_argument(string_format("invalid device: %s", device.c_str()));
  940. }
  941. devices.push_back(dev);
  942. }
  943. devices.push_back(nullptr);
  944. }
  945. return devices;
  946. }
  947. static void add_rpc_devices(std::string servers) {
  948. auto rpc_servers = string_split<std::string>(servers, ',');
  949. if (rpc_servers.empty()) {
  950. throw std::invalid_argument("no RPC servers specified");
  951. }
  952. ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
  953. if (!rpc_reg) {
  954. throw std::invalid_argument("failed to find RPC backend");
  955. }
  956. typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
  957. ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
  958. if (!ggml_backend_rpc_add_device_fn) {
  959. throw std::invalid_argument("failed to find RPC device add function");
  960. }
  961. for (const auto & server : rpc_servers) {
  962. ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
  963. if (dev) {
  964. ggml_backend_device_register(dev);
  965. } else {
  966. throw std::invalid_argument("failed to register RPC device");
  967. }
  968. }
  969. }
  970. bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  971. auto ctx_arg = common_params_parser_init(params, ex, print_usage);
  972. const common_params params_org = ctx_arg.params; // the example can modify the default params
  973. try {
  974. if (!common_params_parse_ex(argc, argv, ctx_arg)) {
  975. ctx_arg.params = params_org;
  976. return false;
  977. }
  978. if (ctx_arg.params.usage) {
  979. common_params_print_usage(ctx_arg);
  980. if (ctx_arg.print_usage) {
  981. ctx_arg.print_usage(argc, argv);
  982. }
  983. exit(0);
  984. }
  985. if (ctx_arg.params.completion) {
  986. common_params_print_completion(ctx_arg);
  987. exit(0);
  988. }
  989. } catch (const std::invalid_argument & ex) {
  990. fprintf(stderr, "%s\n", ex.what());
  991. ctx_arg.params = params_org;
  992. return false;
  993. }
  994. return true;
  995. }
  996. static std::string list_builtin_chat_templates() {
  997. std::vector<const char *> supported_tmpl;
  998. int32_t res = llama_chat_builtin_templates(nullptr, 0);
  999. supported_tmpl.resize(res);
  1000. res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
  1001. std::ostringstream msg;
  1002. for (auto & tmpl : supported_tmpl) {
  1003. msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", ");
  1004. }
  1005. return msg.str();
  1006. }
  1007. common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
  1008. // load dynamic backends
  1009. ggml_backend_load_all();
  1010. common_params_context ctx_arg(params);
  1011. ctx_arg.print_usage = print_usage;
  1012. ctx_arg.ex = ex;
  1013. std::string sampler_type_chars;
  1014. std::string sampler_type_names;
  1015. for (const auto & sampler : params.sampling.samplers) {
  1016. sampler_type_chars += common_sampler_type_to_chr(sampler);
  1017. sampler_type_names += common_sampler_type_to_str(sampler) + ";";
  1018. }
  1019. sampler_type_names.pop_back();
  1020. /**
  1021. * filter options by example
  1022. * rules:
  1023. * - all examples inherit options from LLAMA_EXAMPLE_COMMON
  1024. * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example
  1025. * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
  1026. */
  1027. auto add_opt = [&](common_arg arg) {
  1028. if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
  1029. ctx_arg.options.push_back(std::move(arg));
  1030. }
  1031. };
  1032. add_opt(common_arg(
  1033. {"-h", "--help", "--usage"},
  1034. "print usage and exit",
  1035. [](common_params & params) {
  1036. params.usage = true;
  1037. }
  1038. ));
  1039. add_opt(common_arg(
  1040. {"--version"},
  1041. "show version and build info",
  1042. [](common_params &) {
  1043. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  1044. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  1045. exit(0);
  1046. }
  1047. ));
  1048. add_opt(common_arg(
  1049. {"--completion-bash"},
  1050. "print source-able bash completion script for llama.cpp",
  1051. [](common_params & params) {
  1052. params.completion = true;
  1053. }
  1054. ));
  1055. add_opt(common_arg(
  1056. {"--verbose-prompt"},
  1057. string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"),
  1058. [](common_params & params) {
  1059. params.verbose_prompt = true;
  1060. }
  1061. ));
  1062. add_opt(common_arg(
  1063. {"--no-display-prompt"},
  1064. string_format("don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false"),
  1065. [](common_params & params) {
  1066. params.display_prompt = false;
  1067. }
  1068. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1069. add_opt(common_arg(
  1070. {"-co", "--color"},
  1071. string_format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"),
  1072. [](common_params & params) {
  1073. params.use_color = true;
  1074. }
  1075. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
  1076. add_opt(common_arg(
  1077. {"-t", "--threads"}, "N",
  1078. string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
  1079. [](common_params & params, int value) {
  1080. params.cpuparams.n_threads = value;
  1081. if (params.cpuparams.n_threads <= 0) {
  1082. params.cpuparams.n_threads = std::thread::hardware_concurrency();
  1083. }
  1084. }
  1085. ).set_env("LLAMA_ARG_THREADS"));
  1086. add_opt(common_arg(
  1087. {"-tb", "--threads-batch"}, "N",
  1088. "number of threads to use during batch and prompt processing (default: same as --threads)",
  1089. [](common_params & params, int value) {
  1090. params.cpuparams_batch.n_threads = value;
  1091. if (params.cpuparams_batch.n_threads <= 0) {
  1092. params.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  1093. }
  1094. }
  1095. ));
  1096. add_opt(common_arg(
  1097. {"-C", "--cpu-mask"}, "M",
  1098. "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")",
  1099. [](common_params & params, const std::string & mask) {
  1100. params.cpuparams.mask_valid = true;
  1101. if (!parse_cpu_mask(mask, params.cpuparams.cpumask)) {
  1102. throw std::invalid_argument("invalid cpumask");
  1103. }
  1104. }
  1105. ));
  1106. add_opt(common_arg(
  1107. {"-Cr", "--cpu-range"}, "lo-hi",
  1108. "range of CPUs for affinity. Complements --cpu-mask",
  1109. [](common_params & params, const std::string & range) {
  1110. params.cpuparams.mask_valid = true;
  1111. if (!parse_cpu_range(range, params.cpuparams.cpumask)) {
  1112. throw std::invalid_argument("invalid range");
  1113. }
  1114. }
  1115. ));
  1116. add_opt(common_arg(
  1117. {"--cpu-strict"}, "<0|1>",
  1118. string_format("use strict CPU placement (default: %u)\n", (unsigned) params.cpuparams.strict_cpu),
  1119. [](common_params & params, const std::string & value) {
  1120. params.cpuparams.strict_cpu = std::stoul(value);
  1121. }
  1122. ));
  1123. add_opt(common_arg(
  1124. {"--prio"}, "N",
  1125. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams.priority),
  1126. [](common_params & params, int prio) {
  1127. if (prio < 0 || prio > 3) {
  1128. throw std::invalid_argument("invalid value");
  1129. }
  1130. params.cpuparams.priority = (enum ggml_sched_priority) prio;
  1131. }
  1132. ));
  1133. add_opt(common_arg(
  1134. {"--poll"}, "<0...100>",
  1135. string_format("use polling level to wait for work (0 - no polling, default: %u)\n", (unsigned) params.cpuparams.poll),
  1136. [](common_params & params, const std::string & value) {
  1137. params.cpuparams.poll = std::stoul(value);
  1138. }
  1139. ));
  1140. add_opt(common_arg(
  1141. {"-Cb", "--cpu-mask-batch"}, "M",
  1142. "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)",
  1143. [](common_params & params, const std::string & mask) {
  1144. params.cpuparams_batch.mask_valid = true;
  1145. if (!parse_cpu_mask(mask, params.cpuparams_batch.cpumask)) {
  1146. throw std::invalid_argument("invalid cpumask");
  1147. }
  1148. }
  1149. ));
  1150. add_opt(common_arg(
  1151. {"-Crb", "--cpu-range-batch"}, "lo-hi",
  1152. "ranges of CPUs for affinity. Complements --cpu-mask-batch",
  1153. [](common_params & params, const std::string & range) {
  1154. params.cpuparams_batch.mask_valid = true;
  1155. if (!parse_cpu_range(range, params.cpuparams_batch.cpumask)) {
  1156. throw std::invalid_argument("invalid range");
  1157. }
  1158. }
  1159. ));
  1160. add_opt(common_arg(
  1161. {"--cpu-strict-batch"}, "<0|1>",
  1162. "use strict CPU placement (default: same as --cpu-strict)",
  1163. [](common_params & params, int value) {
  1164. params.cpuparams_batch.strict_cpu = value;
  1165. }
  1166. ));
  1167. add_opt(common_arg(
  1168. {"--prio-batch"}, "N",
  1169. string_format("set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.cpuparams_batch.priority),
  1170. [](common_params & params, int prio) {
  1171. if (prio < 0 || prio > 3) {
  1172. throw std::invalid_argument("invalid value");
  1173. }
  1174. params.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  1175. }
  1176. ));
  1177. add_opt(common_arg(
  1178. {"--poll-batch"}, "<0|1>",
  1179. "use polling to wait for work (default: same as --poll)",
  1180. [](common_params & params, int value) {
  1181. params.cpuparams_batch.poll = value;
  1182. }
  1183. ));
  1184. add_opt(common_arg(
  1185. {"-lcs", "--lookup-cache-static"}, "FNAME",
  1186. "path to static lookup cache to use for lookup decoding (not updated by generation)",
  1187. [](common_params & params, const std::string & value) {
  1188. params.lookup_cache_static = value;
  1189. }
  1190. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  1191. add_opt(common_arg(
  1192. {"-lcd", "--lookup-cache-dynamic"}, "FNAME",
  1193. "path to dynamic lookup cache to use for lookup decoding (updated by generation)",
  1194. [](common_params & params, const std::string & value) {
  1195. params.lookup_cache_dynamic = value;
  1196. }
  1197. ).set_examples({LLAMA_EXAMPLE_LOOKUP}));
  1198. add_opt(common_arg(
  1199. {"-c", "--ctx-size"}, "N",
  1200. string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
  1201. [](common_params & params, int value) {
  1202. params.n_ctx = value;
  1203. }
  1204. ).set_env("LLAMA_ARG_CTX_SIZE"));
  1205. add_opt(common_arg(
  1206. {"-n", "--predict", "--n-predict"}, "N",
  1207. string_format(
  1208. ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL
  1209. ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
  1210. : "number of tokens to predict (default: %d, -1 = infinity)",
  1211. params.n_predict),
  1212. [](common_params & params, int value) {
  1213. params.n_predict = value;
  1214. }
  1215. ).set_env("LLAMA_ARG_N_PREDICT"));
  1216. add_opt(common_arg(
  1217. {"-b", "--batch-size"}, "N",
  1218. string_format("logical maximum batch size (default: %d)", params.n_batch),
  1219. [](common_params & params, int value) {
  1220. params.n_batch = value;
  1221. }
  1222. ).set_env("LLAMA_ARG_BATCH"));
  1223. add_opt(common_arg(
  1224. {"-ub", "--ubatch-size"}, "N",
  1225. string_format("physical maximum batch size (default: %d)", params.n_ubatch),
  1226. [](common_params & params, int value) {
  1227. params.n_ubatch = value;
  1228. }
  1229. ).set_env("LLAMA_ARG_UBATCH"));
  1230. add_opt(common_arg(
  1231. {"--keep"}, "N",
  1232. string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep),
  1233. [](common_params & params, int value) {
  1234. params.n_keep = value;
  1235. }
  1236. ));
  1237. add_opt(common_arg(
  1238. {"--no-context-shift"},
  1239. string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
  1240. [](common_params & params) {
  1241. params.ctx_shift = false;
  1242. }
  1243. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
  1244. add_opt(common_arg(
  1245. {"--chunks"}, "N",
  1246. string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
  1247. [](common_params & params, int value) {
  1248. params.n_chunks = value;
  1249. }
  1250. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
  1251. add_opt(common_arg(
  1252. {"-fa", "--flash-attn"},
  1253. string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
  1254. [](common_params & params) {
  1255. params.flash_attn = true;
  1256. }
  1257. ).set_env("LLAMA_ARG_FLASH_ATTN"));
  1258. add_opt(common_arg(
  1259. {"-p", "--prompt"}, "PROMPT",
  1260. "prompt to start generation with; for system message, use -sys",
  1261. [](common_params & params, const std::string & value) {
  1262. params.prompt = value;
  1263. }
  1264. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1265. add_opt(common_arg(
  1266. {"-sys", "--system-prompt"}, "PROMPT",
  1267. "system prompt to use with model (if applicable, depending on chat template)",
  1268. [](common_params & params, const std::string & value) {
  1269. params.system_prompt = value;
  1270. }
  1271. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1272. add_opt(common_arg(
  1273. {"--no-perf"},
  1274. string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
  1275. [](common_params & params) {
  1276. params.no_perf = true;
  1277. params.sampling.no_perf = true;
  1278. }
  1279. ).set_env("LLAMA_ARG_NO_PERF"));
  1280. add_opt(common_arg(
  1281. {"-f", "--file"}, "FNAME",
  1282. "a file containing the prompt (default: none)",
  1283. [](common_params & params, const std::string & value) {
  1284. std::ifstream file(value);
  1285. if (!file) {
  1286. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1287. }
  1288. // store the external file name in params
  1289. params.prompt_file = value;
  1290. std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
  1291. if (!params.prompt.empty() && params.prompt.back() == '\n') {
  1292. params.prompt.pop_back();
  1293. }
  1294. }
  1295. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1296. add_opt(common_arg(
  1297. {"-sysf", "--system-prompt-file"}, "FNAME",
  1298. "a file containing the system prompt (default: none)",
  1299. [](common_params & params, const std::string & value) {
  1300. std::ifstream file(value);
  1301. if (!file) {
  1302. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1303. }
  1304. std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.system_prompt));
  1305. if (!params.system_prompt.empty() && params.system_prompt.back() == '\n') {
  1306. params.system_prompt.pop_back();
  1307. }
  1308. }
  1309. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1310. add_opt(common_arg(
  1311. {"--in-file"}, "FNAME",
  1312. "an input file (repeat to specify multiple files)",
  1313. [](common_params & params, const std::string & value) {
  1314. std::ifstream file(value);
  1315. if (!file) {
  1316. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1317. }
  1318. params.in_files.push_back(value);
  1319. }
  1320. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  1321. add_opt(common_arg(
  1322. {"-bf", "--binary-file"}, "FNAME",
  1323. "binary file containing the prompt (default: none)",
  1324. [](common_params & params, const std::string & value) {
  1325. std::ifstream file(value, std::ios::binary);
  1326. if (!file) {
  1327. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1328. }
  1329. // store the external file name in params
  1330. params.prompt_file = value;
  1331. std::ostringstream ss;
  1332. ss << file.rdbuf();
  1333. params.prompt = ss.str();
  1334. fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
  1335. }
  1336. ).set_excludes({LLAMA_EXAMPLE_SERVER}));
  1337. add_opt(common_arg(
  1338. {"-e", "--escape"},
  1339. string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
  1340. [](common_params & params) {
  1341. params.escape = true;
  1342. }
  1343. ));
  1344. add_opt(common_arg(
  1345. {"--no-escape"},
  1346. "do not process escape sequences",
  1347. [](common_params & params) {
  1348. params.escape = false;
  1349. }
  1350. ));
  1351. add_opt(common_arg(
  1352. {"-ptc", "--print-token-count"}, "N",
  1353. string_format("print token count every N tokens (default: %d)", params.n_print),
  1354. [](common_params & params, int value) {
  1355. params.n_print = value;
  1356. }
  1357. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1358. add_opt(common_arg(
  1359. {"--prompt-cache"}, "FNAME",
  1360. "file to cache prompt state for faster startup (default: none)",
  1361. [](common_params & params, const std::string & value) {
  1362. params.path_prompt_cache = value;
  1363. }
  1364. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1365. add_opt(common_arg(
  1366. {"--prompt-cache-all"},
  1367. "if specified, saves user input and generations to cache as well\n",
  1368. [](common_params & params) {
  1369. params.prompt_cache_all = true;
  1370. }
  1371. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1372. add_opt(common_arg(
  1373. {"--prompt-cache-ro"},
  1374. "if specified, uses the prompt cache but does not update it",
  1375. [](common_params & params) {
  1376. params.prompt_cache_ro = true;
  1377. }
  1378. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1379. add_opt(common_arg(
  1380. {"-r", "--reverse-prompt"}, "PROMPT",
  1381. "halt generation at PROMPT, return control in interactive mode\n",
  1382. [](common_params & params, const std::string & value) {
  1383. params.antiprompt.emplace_back(value);
  1384. }
  1385. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1386. add_opt(common_arg(
  1387. {"-sp", "--special"},
  1388. string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
  1389. [](common_params & params) {
  1390. params.special = true;
  1391. }
  1392. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
  1393. add_opt(common_arg(
  1394. {"-cnv", "--conversation"},
  1395. "run in conversation mode:\n"
  1396. "- does not print special tokens and suffix/prefix\n"
  1397. "- interactive mode is also enabled\n"
  1398. "(default: auto enabled if chat template is available)",
  1399. [](common_params & params) {
  1400. params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
  1401. }
  1402. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1403. add_opt(common_arg(
  1404. {"-no-cnv", "--no-conversation"},
  1405. "force disable conversation mode (default: false)",
  1406. [](common_params & params) {
  1407. params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
  1408. }
  1409. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1410. add_opt(common_arg(
  1411. {"-st", "--single-turn"},
  1412. "run conversation for a single turn only, then exit when done\n"
  1413. "will not be interactive if first turn is predefined with --prompt\n"
  1414. "(default: false)",
  1415. [](common_params & params) {
  1416. params.single_turn = true;
  1417. }
  1418. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1419. add_opt(common_arg(
  1420. {"-i", "--interactive"},
  1421. string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"),
  1422. [](common_params & params) {
  1423. params.interactive = true;
  1424. }
  1425. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1426. add_opt(common_arg(
  1427. {"-if", "--interactive-first"},
  1428. string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"),
  1429. [](common_params & params) {
  1430. params.interactive_first = true;
  1431. }
  1432. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1433. add_opt(common_arg(
  1434. {"-mli", "--multiline-input"},
  1435. "allows you to write or paste multiple lines without ending each in '\\'",
  1436. [](common_params & params) {
  1437. params.multiline_input = true;
  1438. }
  1439. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1440. add_opt(common_arg(
  1441. {"--in-prefix-bos"},
  1442. "prefix BOS to user inputs, preceding the `--in-prefix` string",
  1443. [](common_params & params) {
  1444. params.input_prefix_bos = true;
  1445. params.enable_chat_template = false;
  1446. }
  1447. ).set_examples({LLAMA_EXAMPLE_MAIN}));
  1448. add_opt(common_arg(
  1449. {"--in-prefix"}, "STRING",
  1450. "string to prefix user inputs with (default: empty)",
  1451. [](common_params & params, const std::string & value) {
  1452. params.input_prefix = value;
  1453. params.enable_chat_template = false;
  1454. }
  1455. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  1456. add_opt(common_arg(
  1457. {"--in-suffix"}, "STRING",
  1458. "string to suffix after user inputs with (default: empty)",
  1459. [](common_params & params, const std::string & value) {
  1460. params.input_suffix = value;
  1461. params.enable_chat_template = false;
  1462. }
  1463. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  1464. add_opt(common_arg(
  1465. {"--no-warmup"},
  1466. "skip warming up the model with an empty run",
  1467. [](common_params & params) {
  1468. params.warmup = false;
  1469. }
  1470. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_EMBEDDING}));
  1471. add_opt(common_arg(
  1472. {"--spm-infill"},
  1473. string_format(
  1474. "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)",
  1475. params.spm_infill ? "enabled" : "disabled"
  1476. ),
  1477. [](common_params & params) {
  1478. params.spm_infill = true;
  1479. }
  1480. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
  1481. add_opt(common_arg(
  1482. {"--samplers"}, "SAMPLERS",
  1483. string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
  1484. [](common_params & params, const std::string & value) {
  1485. const auto sampler_names = string_split<std::string>(value, ';');
  1486. params.sampling.samplers = common_sampler_types_from_names(sampler_names, true);
  1487. }
  1488. ).set_sparam());
  1489. add_opt(common_arg(
  1490. {"-s", "--seed"}, "SEED",
  1491. string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED),
  1492. [](common_params & params, const std::string & value) {
  1493. params.sampling.seed = std::stoul(value);
  1494. }
  1495. ).set_sparam());
  1496. add_opt(common_arg(
  1497. {"--sampling-seq", "--sampler-seq"}, "SEQUENCE",
  1498. string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()),
  1499. [](common_params & params, const std::string & value) {
  1500. params.sampling.samplers = common_sampler_types_from_chars(value);
  1501. }
  1502. ).set_sparam());
  1503. add_opt(common_arg(
  1504. {"--ignore-eos"},
  1505. "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)",
  1506. [](common_params & params) {
  1507. params.sampling.ignore_eos = true;
  1508. }
  1509. ).set_sparam());
  1510. add_opt(common_arg(
  1511. {"--temp"}, "N",
  1512. string_format("temperature (default: %.1f)", (double)params.sampling.temp),
  1513. [](common_params & params, const std::string & value) {
  1514. params.sampling.temp = std::stof(value);
  1515. params.sampling.temp = std::max(params.sampling.temp, 0.0f);
  1516. }
  1517. ).set_sparam());
  1518. add_opt(common_arg(
  1519. {"--top-k"}, "N",
  1520. string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k),
  1521. [](common_params & params, int value) {
  1522. params.sampling.top_k = value;
  1523. }
  1524. ).set_sparam());
  1525. add_opt(common_arg(
  1526. {"--top-p"}, "N",
  1527. string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p),
  1528. [](common_params & params, const std::string & value) {
  1529. params.sampling.top_p = std::stof(value);
  1530. }
  1531. ).set_sparam());
  1532. add_opt(common_arg(
  1533. {"--min-p"}, "N",
  1534. string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p),
  1535. [](common_params & params, const std::string & value) {
  1536. params.sampling.min_p = std::stof(value);
  1537. }
  1538. ).set_sparam());
  1539. add_opt(common_arg(
  1540. {"--top-nsigma"}, "N",
  1541. string_format("top-n-sigma sampling (default: %.1f, -1.0 = disabled)", params.sampling.top_n_sigma),
  1542. [](common_params & params, const std::string & value) {
  1543. params.sampling.top_n_sigma = std::stof(value);
  1544. }
  1545. ).set_examples({LLAMA_EXAMPLE_MAIN}).set_sparam());
  1546. add_opt(common_arg(
  1547. {"--xtc-probability"}, "N",
  1548. string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability),
  1549. [](common_params & params, const std::string & value) {
  1550. params.sampling.xtc_probability = std::stof(value);
  1551. }
  1552. ).set_sparam());
  1553. add_opt(common_arg(
  1554. {"--xtc-threshold"}, "N",
  1555. string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold),
  1556. [](common_params & params, const std::string & value) {
  1557. params.sampling.xtc_threshold = std::stof(value);
  1558. }
  1559. ).set_sparam());
  1560. add_opt(common_arg(
  1561. {"--typical"}, "N",
  1562. string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p),
  1563. [](common_params & params, const std::string & value) {
  1564. params.sampling.typ_p = std::stof(value);
  1565. }
  1566. ).set_sparam());
  1567. add_opt(common_arg(
  1568. {"--repeat-last-n"}, "N",
  1569. string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n),
  1570. [](common_params & params, int value) {
  1571. if (value < -1) {
  1572. throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value));
  1573. }
  1574. params.sampling.penalty_last_n = value;
  1575. params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n);
  1576. }
  1577. ).set_sparam());
  1578. add_opt(common_arg(
  1579. {"--repeat-penalty"}, "N",
  1580. string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat),
  1581. [](common_params & params, const std::string & value) {
  1582. params.sampling.penalty_repeat = std::stof(value);
  1583. }
  1584. ).set_sparam());
  1585. add_opt(common_arg(
  1586. {"--presence-penalty"}, "N",
  1587. string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present),
  1588. [](common_params & params, const std::string & value) {
  1589. params.sampling.penalty_present = std::stof(value);
  1590. }
  1591. ).set_sparam());
  1592. add_opt(common_arg(
  1593. {"--frequency-penalty"}, "N",
  1594. string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq),
  1595. [](common_params & params, const std::string & value) {
  1596. params.sampling.penalty_freq = std::stof(value);
  1597. }
  1598. ).set_sparam());
  1599. add_opt(common_arg(
  1600. {"--dry-multiplier"}, "N",
  1601. string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier),
  1602. [](common_params & params, const std::string & value) {
  1603. params.sampling.dry_multiplier = std::stof(value);
  1604. }
  1605. ).set_sparam());
  1606. add_opt(common_arg(
  1607. {"--dry-base"}, "N",
  1608. string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base),
  1609. [](common_params & params, const std::string & value) {
  1610. float potential_base = std::stof(value);
  1611. if (potential_base >= 1.0f)
  1612. {
  1613. params.sampling.dry_base = potential_base;
  1614. }
  1615. }
  1616. ).set_sparam());
  1617. add_opt(common_arg(
  1618. {"--dry-allowed-length"}, "N",
  1619. string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length),
  1620. [](common_params & params, int value) {
  1621. params.sampling.dry_allowed_length = value;
  1622. }
  1623. ).set_sparam());
  1624. add_opt(common_arg(
  1625. {"--dry-penalty-last-n"}, "N",
  1626. string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n),
  1627. [](common_params & params, int value) {
  1628. if (value < -1) {
  1629. throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value));
  1630. }
  1631. params.sampling.dry_penalty_last_n = value;
  1632. }
  1633. ).set_sparam());
  1634. add_opt(common_arg(
  1635. {"--dry-sequence-breaker"}, "STRING",
  1636. string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n",
  1637. params.sampling.dry_sequence_breakers.empty() ? "none" :
  1638. std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()),
  1639. params.sampling.dry_sequence_breakers.end(),
  1640. std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'",
  1641. [](const std::string& a, const std::string& b) {
  1642. std::string formatted_b = (b == "\n") ? "\\n" : b;
  1643. return a + ", '" + formatted_b + "'";
  1644. }).c_str()),
  1645. [](common_params & params, const std::string & value) {
  1646. static bool defaults_cleared = false;
  1647. if (!defaults_cleared) {
  1648. params.sampling.dry_sequence_breakers.clear();
  1649. defaults_cleared = true;
  1650. }
  1651. if (value == "none") {
  1652. params.sampling.dry_sequence_breakers.clear();
  1653. } else {
  1654. params.sampling.dry_sequence_breakers.emplace_back(value);
  1655. }
  1656. }
  1657. ).set_sparam());
  1658. add_opt(common_arg(
  1659. {"--dynatemp-range"}, "N",
  1660. string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range),
  1661. [](common_params & params, const std::string & value) {
  1662. params.sampling.dynatemp_range = std::stof(value);
  1663. }
  1664. ).set_sparam());
  1665. add_opt(common_arg(
  1666. {"--dynatemp-exp"}, "N",
  1667. string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent),
  1668. [](common_params & params, const std::string & value) {
  1669. params.sampling.dynatemp_exponent = std::stof(value);
  1670. }
  1671. ).set_sparam());
  1672. add_opt(common_arg(
  1673. {"--mirostat"}, "N",
  1674. string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n"
  1675. "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat),
  1676. [](common_params & params, int value) {
  1677. params.sampling.mirostat = value;
  1678. }
  1679. ).set_sparam());
  1680. add_opt(common_arg(
  1681. {"--mirostat-lr"}, "N",
  1682. string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta),
  1683. [](common_params & params, const std::string & value) {
  1684. params.sampling.mirostat_eta = std::stof(value);
  1685. }
  1686. ).set_sparam());
  1687. add_opt(common_arg(
  1688. {"--mirostat-ent"}, "N",
  1689. string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau),
  1690. [](common_params & params, const std::string & value) {
  1691. params.sampling.mirostat_tau = std::stof(value);
  1692. }
  1693. ).set_sparam());
  1694. add_opt(common_arg(
  1695. {"-l", "--logit-bias"}, "TOKEN_ID(+/-)BIAS",
  1696. "modifies the likelihood of token appearing in the completion,\n"
  1697. "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
  1698. "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'",
  1699. [](common_params & params, const std::string & value) {
  1700. std::stringstream ss(value);
  1701. llama_token key;
  1702. char sign;
  1703. std::string value_str;
  1704. try {
  1705. if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
  1706. const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  1707. params.sampling.logit_bias.push_back({key, bias});
  1708. } else {
  1709. throw std::invalid_argument("invalid input format");
  1710. }
  1711. } catch (const std::exception&) {
  1712. throw std::invalid_argument("invalid input format");
  1713. }
  1714. }
  1715. ).set_sparam());
  1716. add_opt(common_arg(
  1717. {"--grammar"}, "GRAMMAR",
  1718. string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()),
  1719. [](common_params & params, const std::string & value) {
  1720. params.sampling.grammar = value;
  1721. }
  1722. ).set_sparam());
  1723. add_opt(common_arg(
  1724. {"--grammar-file"}, "FNAME",
  1725. "file to read grammar from",
  1726. [](common_params & params, const std::string & value) {
  1727. std::ifstream file(value);
  1728. if (!file) {
  1729. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  1730. }
  1731. std::copy(
  1732. std::istreambuf_iterator<char>(file),
  1733. std::istreambuf_iterator<char>(),
  1734. std::back_inserter(params.sampling.grammar)
  1735. );
  1736. }
  1737. ).set_sparam());
  1738. add_opt(common_arg(
  1739. {"-j", "--json-schema"}, "SCHEMA",
  1740. "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead",
  1741. [](common_params & params, const std::string & value) {
  1742. params.sampling.grammar = json_schema_to_grammar(json::parse(value));
  1743. }
  1744. ).set_sparam());
  1745. add_opt(common_arg(
  1746. {"--pooling"}, "{none,mean,cls,last,rank}",
  1747. "pooling type for embeddings, use model default if unspecified",
  1748. [](common_params & params, const std::string & value) {
  1749. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1750. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1751. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1752. else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
  1753. else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; }
  1754. else { throw std::invalid_argument("invalid value"); }
  1755. }
  1756. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING"));
  1757. add_opt(common_arg(
  1758. {"--attention"}, "{causal,non-causal}",
  1759. "attention type for embeddings, use model default if unspecified",
  1760. [](common_params & params, const std::string & value) {
  1761. /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
  1762. else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
  1763. else { throw std::invalid_argument("invalid value"); }
  1764. }
  1765. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  1766. add_opt(common_arg(
  1767. {"--rope-scaling"}, "{none,linear,yarn}",
  1768. "RoPE frequency scaling method, defaults to linear unless specified by the model",
  1769. [](common_params & params, const std::string & value) {
  1770. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1771. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1772. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1773. else { throw std::invalid_argument("invalid value"); }
  1774. }
  1775. ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE"));
  1776. add_opt(common_arg(
  1777. {"--rope-scale"}, "N",
  1778. "RoPE context scaling factor, expands context by a factor of N",
  1779. [](common_params & params, const std::string & value) {
  1780. params.rope_freq_scale = 1.0f / std::stof(value);
  1781. }
  1782. ).set_env("LLAMA_ARG_ROPE_SCALE"));
  1783. add_opt(common_arg(
  1784. {"--rope-freq-base"}, "N",
  1785. "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)",
  1786. [](common_params & params, const std::string & value) {
  1787. params.rope_freq_base = std::stof(value);
  1788. }
  1789. ).set_env("LLAMA_ARG_ROPE_FREQ_BASE"));
  1790. add_opt(common_arg(
  1791. {"--rope-freq-scale"}, "N",
  1792. "RoPE frequency scaling factor, expands context by a factor of 1/N",
  1793. [](common_params & params, const std::string & value) {
  1794. params.rope_freq_scale = std::stof(value);
  1795. }
  1796. ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE"));
  1797. add_opt(common_arg(
  1798. {"--yarn-orig-ctx"}, "N",
  1799. string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx),
  1800. [](common_params & params, int value) {
  1801. params.yarn_orig_ctx = value;
  1802. }
  1803. ).set_env("LLAMA_ARG_YARN_ORIG_CTX"));
  1804. add_opt(common_arg(
  1805. {"--yarn-ext-factor"}, "N",
  1806. string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor),
  1807. [](common_params & params, const std::string & value) {
  1808. params.yarn_ext_factor = std::stof(value);
  1809. }
  1810. ).set_env("LLAMA_ARG_YARN_EXT_FACTOR"));
  1811. add_opt(common_arg(
  1812. {"--yarn-attn-factor"}, "N",
  1813. string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor),
  1814. [](common_params & params, const std::string & value) {
  1815. params.yarn_attn_factor = std::stof(value);
  1816. }
  1817. ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR"));
  1818. add_opt(common_arg(
  1819. {"--yarn-beta-slow"}, "N",
  1820. string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow),
  1821. [](common_params & params, const std::string & value) {
  1822. params.yarn_beta_slow = std::stof(value);
  1823. }
  1824. ).set_env("LLAMA_ARG_YARN_BETA_SLOW"));
  1825. add_opt(common_arg(
  1826. {"--yarn-beta-fast"}, "N",
  1827. string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast),
  1828. [](common_params & params, const std::string & value) {
  1829. params.yarn_beta_fast = std::stof(value);
  1830. }
  1831. ).set_env("LLAMA_ARG_YARN_BETA_FAST"));
  1832. add_opt(common_arg(
  1833. {"-gan", "--grp-attn-n"}, "N",
  1834. string_format("group-attention factor (default: %d)", params.grp_attn_n),
  1835. [](common_params & params, int value) {
  1836. params.grp_attn_n = value;
  1837. }
  1838. ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_PASSKEY}));
  1839. add_opt(common_arg(
  1840. {"-gaw", "--grp-attn-w"}, "N",
  1841. string_format("group-attention width (default: %d)", params.grp_attn_w),
  1842. [](common_params & params, int value) {
  1843. params.grp_attn_w = value;
  1844. }
  1845. ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_MAIN}));
  1846. add_opt(common_arg(
  1847. {"-dkvc", "--dump-kv-cache"},
  1848. "verbose print of the KV cache",
  1849. [](common_params & params) {
  1850. params.dump_kv_cache = true;
  1851. }
  1852. ));
  1853. add_opt(common_arg(
  1854. {"-nkvo", "--no-kv-offload"},
  1855. "disable KV offload",
  1856. [](common_params & params) {
  1857. params.no_kv_offload = true;
  1858. }
  1859. ).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
  1860. add_opt(common_arg(
  1861. {"-ctk", "--cache-type-k"}, "TYPE",
  1862. string_format(
  1863. "KV cache data type for K\n"
  1864. "allowed values: %s\n"
  1865. "(default: %s)",
  1866. get_all_kv_cache_types().c_str(),
  1867. ggml_type_name(params.cache_type_k)
  1868. ),
  1869. [](common_params & params, const std::string & value) {
  1870. params.cache_type_k = kv_cache_type_from_str(value);
  1871. }
  1872. ).set_env("LLAMA_ARG_CACHE_TYPE_K"));
  1873. add_opt(common_arg(
  1874. {"-ctv", "--cache-type-v"}, "TYPE",
  1875. string_format(
  1876. "KV cache data type for V\n"
  1877. "allowed values: %s\n"
  1878. "(default: %s)",
  1879. get_all_kv_cache_types().c_str(),
  1880. ggml_type_name(params.cache_type_v)
  1881. ),
  1882. [](common_params & params, const std::string & value) {
  1883. params.cache_type_v = kv_cache_type_from_str(value);
  1884. }
  1885. ).set_env("LLAMA_ARG_CACHE_TYPE_V"));
  1886. add_opt(common_arg(
  1887. {"--perplexity", "--all-logits"},
  1888. string_format("return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false"),
  1889. [](common_params & params) {
  1890. params.logits_all = true;
  1891. }
  1892. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1893. add_opt(common_arg(
  1894. {"--hellaswag"},
  1895. "compute HellaSwag score over random tasks from datafile supplied with -f",
  1896. [](common_params & params) {
  1897. params.hellaswag = true;
  1898. }
  1899. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1900. add_opt(common_arg(
  1901. {"--hellaswag-tasks"}, "N",
  1902. string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks),
  1903. [](common_params & params, int value) {
  1904. params.hellaswag_tasks = value;
  1905. }
  1906. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1907. add_opt(common_arg(
  1908. {"--winogrande"},
  1909. "compute Winogrande score over random tasks from datafile supplied with -f",
  1910. [](common_params & params) {
  1911. params.winogrande = true;
  1912. }
  1913. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1914. add_opt(common_arg(
  1915. {"--winogrande-tasks"}, "N",
  1916. string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks),
  1917. [](common_params & params, int value) {
  1918. params.winogrande_tasks = value;
  1919. }
  1920. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1921. add_opt(common_arg(
  1922. {"--multiple-choice"},
  1923. "compute multiple choice score over random tasks from datafile supplied with -f",
  1924. [](common_params & params) {
  1925. params.multiple_choice = true;
  1926. }
  1927. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1928. add_opt(common_arg(
  1929. {"--multiple-choice-tasks"}, "N",
  1930. string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks),
  1931. [](common_params & params, int value) {
  1932. params.multiple_choice_tasks = value;
  1933. }
  1934. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1935. add_opt(common_arg(
  1936. {"--kl-divergence"},
  1937. "computes KL-divergence to logits provided via --kl-divergence-base",
  1938. [](common_params & params) {
  1939. params.kl_divergence = true;
  1940. }
  1941. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1942. add_opt(common_arg(
  1943. {"--save-all-logits", "--kl-divergence-base"}, "FNAME",
  1944. "set logits file",
  1945. [](common_params & params, const std::string & value) {
  1946. params.logits_file = value;
  1947. }
  1948. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1949. add_opt(common_arg(
  1950. {"--ppl-stride"}, "N",
  1951. string_format("stride for perplexity calculation (default: %d)", params.ppl_stride),
  1952. [](common_params & params, int value) {
  1953. params.ppl_stride = value;
  1954. }
  1955. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1956. add_opt(common_arg(
  1957. {"--ppl-output-type"}, "<0|1>",
  1958. string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type),
  1959. [](common_params & params, int value) {
  1960. params.ppl_output_type = value;
  1961. }
  1962. ).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
  1963. add_opt(common_arg(
  1964. {"-dt", "--defrag-thold"}, "N",
  1965. string_format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold),
  1966. [](common_params & params, const std::string & value) {
  1967. params.defrag_thold = std::stof(value);
  1968. }
  1969. ).set_env("LLAMA_ARG_DEFRAG_THOLD"));
  1970. add_opt(common_arg(
  1971. {"-np", "--parallel"}, "N",
  1972. string_format("number of parallel sequences to decode (default: %d)", params.n_parallel),
  1973. [](common_params & params, int value) {
  1974. params.n_parallel = value;
  1975. }
  1976. ).set_env("LLAMA_ARG_N_PARALLEL"));
  1977. add_opt(common_arg(
  1978. {"-ns", "--sequences"}, "N",
  1979. string_format("number of sequences to decode (default: %d)", params.n_sequences),
  1980. [](common_params & params, int value) {
  1981. params.n_sequences = value;
  1982. }
  1983. ).set_examples({LLAMA_EXAMPLE_PARALLEL}));
  1984. add_opt(common_arg(
  1985. {"-cb", "--cont-batching"},
  1986. string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"),
  1987. [](common_params & params) {
  1988. params.cont_batching = true;
  1989. }
  1990. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING"));
  1991. add_opt(common_arg(
  1992. {"-nocb", "--no-cont-batching"},
  1993. "disable continuous batching",
  1994. [](common_params & params) {
  1995. params.cont_batching = false;
  1996. }
  1997. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
  1998. add_opt(common_arg(
  1999. {"--mmproj"}, "FILE",
  2000. "path to a multimodal projector file. see examples/llava/README.md",
  2001. [](common_params & params, const std::string & value) {
  2002. params.mmproj.path = value;
  2003. }
  2004. ).set_examples(mmproj_examples));
  2005. add_opt(common_arg(
  2006. {"--mmproj-url"}, "URL",
  2007. "URL to a multimodal projector file. see examples/llava/README.md",
  2008. [](common_params & params, const std::string & value) {
  2009. params.mmproj.url = value;
  2010. }
  2011. ).set_examples(mmproj_examples));
  2012. add_opt(common_arg(
  2013. {"--no-mmproj"},
  2014. "explicitly disable multimodal projector, useful when using -hf",
  2015. [](common_params & params) {
  2016. params.no_mmproj = true;
  2017. }
  2018. ).set_examples(mmproj_examples));
  2019. add_opt(common_arg(
  2020. {"--no-mmproj-offload"},
  2021. "do not offload multimodal projector to GPU",
  2022. [](common_params & params) {
  2023. params.mmproj_use_gpu = false;
  2024. }
  2025. ).set_examples(mmproj_examples));
  2026. add_opt(common_arg(
  2027. {"--image"}, "FILE",
  2028. "path to an image file. use with multimodal models. Specify multiple times for batching",
  2029. [](common_params & params, const std::string & value) {
  2030. params.image.emplace_back(value);
  2031. }
  2032. ).set_examples({LLAMA_EXAMPLE_LLAVA}));
  2033. if (llama_supports_rpc()) {
  2034. add_opt(common_arg(
  2035. {"--rpc"}, "SERVERS",
  2036. "comma separated list of RPC servers",
  2037. [](common_params & params, const std::string & value) {
  2038. add_rpc_devices(value);
  2039. GGML_UNUSED(params);
  2040. }
  2041. ).set_env("LLAMA_ARG_RPC"));
  2042. }
  2043. add_opt(common_arg(
  2044. {"--mlock"},
  2045. "force system to keep model in RAM rather than swapping or compressing",
  2046. [](common_params & params) {
  2047. params.use_mlock = true;
  2048. }
  2049. ).set_env("LLAMA_ARG_MLOCK"));
  2050. add_opt(common_arg(
  2051. {"--no-mmap"},
  2052. "do not memory-map model (slower load but may reduce pageouts if not using mlock)",
  2053. [](common_params & params) {
  2054. params.use_mmap = false;
  2055. }
  2056. ).set_env("LLAMA_ARG_NO_MMAP"));
  2057. add_opt(common_arg(
  2058. {"--numa"}, "TYPE",
  2059. "attempt optimizations that help on some NUMA systems\n"
  2060. "- distribute: spread execution evenly over all nodes\n"
  2061. "- isolate: only spawn threads on CPUs on the node that execution started on\n"
  2062. "- numactl: use the CPU map provided by numactl\n"
  2063. "if run without this previously, it is recommended to drop the system page cache before using this\n"
  2064. "see https://github.com/ggml-org/llama.cpp/issues/1437",
  2065. [](common_params & params, const std::string & value) {
  2066. /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2067. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2068. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2069. else { throw std::invalid_argument("invalid value"); }
  2070. }
  2071. ).set_env("LLAMA_ARG_NUMA"));
  2072. add_opt(common_arg(
  2073. {"-dev", "--device"}, "<dev1,dev2,..>",
  2074. "comma-separated list of devices to use for offloading (none = don't offload)\n"
  2075. "use --list-devices to see a list of available devices",
  2076. [](common_params & params, const std::string & value) {
  2077. params.devices = parse_device_list(value);
  2078. }
  2079. ).set_env("LLAMA_ARG_DEVICE"));
  2080. add_opt(common_arg(
  2081. {"--list-devices"},
  2082. "print list of available devices and exit",
  2083. [](common_params &) {
  2084. std::vector<ggml_backend_dev_t> rpc_devices;
  2085. std::vector<ggml_backend_dev_t> all_devices;
  2086. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  2087. auto * dev = ggml_backend_dev_get(i);
  2088. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
  2089. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  2090. if (ggml_backend_reg_name(reg) == std::string("RPC")) {
  2091. rpc_devices.push_back(dev);
  2092. } else {
  2093. all_devices.push_back(dev);
  2094. }
  2095. }
  2096. }
  2097. // insert RPC devices in front
  2098. all_devices.insert(all_devices.begin(), rpc_devices.begin(), rpc_devices.end());
  2099. printf("Available devices:\n");
  2100. for (size_t i = 0; i < all_devices.size(); ++i) {
  2101. auto * dev = all_devices[i];
  2102. size_t free, total;
  2103. ggml_backend_dev_memory(dev, &free, &total);
  2104. printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
  2105. }
  2106. exit(0);
  2107. }
  2108. ));
  2109. add_opt(common_arg(
  2110. {"--override-tensor", "-ot"}, "<tensor name pattern>=<buffer type>,...",
  2111. "override tensor buffer type", [](common_params & params, const std::string & value) {
  2112. /* static */ std::map<std::string, ggml_backend_buffer_type_t> buft_list;
  2113. if (buft_list.empty()) {
  2114. // enumerate all the devices and add their buffer types to the list
  2115. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  2116. auto * dev = ggml_backend_dev_get(i);
  2117. auto * buft = ggml_backend_dev_buffer_type(dev);
  2118. if (buft) {
  2119. buft_list[ggml_backend_buft_name(buft)] = buft;
  2120. }
  2121. }
  2122. }
  2123. for (const auto & override : string_split<std::string>(value, ',')) {
  2124. std::string::size_type pos = override.find('=');
  2125. if (pos == std::string::npos) {
  2126. throw std::invalid_argument("invalid value");
  2127. }
  2128. std::string tensor_name = override.substr(0, pos);
  2129. std::string buffer_type = override.substr(pos + 1);
  2130. if (buft_list.find(buffer_type) == buft_list.end()) {
  2131. printf("Available buffer types:\n");
  2132. for (const auto & it : buft_list) {
  2133. printf(" %s\n", ggml_backend_buft_name(it.second));
  2134. }
  2135. throw std::invalid_argument("unknown buffer type");
  2136. }
  2137. // FIXME: this leaks memory
  2138. params.tensor_buft_overrides.push_back({strdup(tensor_name.c_str()), buft_list.at(buffer_type)});
  2139. }
  2140. }
  2141. ));
  2142. add_opt(common_arg(
  2143. {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
  2144. "number of layers to store in VRAM",
  2145. [](common_params & params, int value) {
  2146. params.n_gpu_layers = value;
  2147. if (!llama_supports_gpu_offload()) {
  2148. fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n");
  2149. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2150. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2151. }
  2152. }
  2153. ).set_env("LLAMA_ARG_N_GPU_LAYERS"));
  2154. add_opt(common_arg(
  2155. {"-sm", "--split-mode"}, "{none,layer,row}",
  2156. "how to split the model across multiple GPUs, one of:\n"
  2157. "- none: use one GPU only\n"
  2158. "- layer (default): split layers and KV across GPUs\n"
  2159. "- row: split rows across GPUs",
  2160. [](common_params & params, const std::string & value) {
  2161. std::string arg_next = value;
  2162. if (arg_next == "none") {
  2163. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  2164. } else if (arg_next == "layer") {
  2165. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  2166. } else if (arg_next == "row") {
  2167. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  2168. } else {
  2169. throw std::invalid_argument("invalid value");
  2170. }
  2171. if (!llama_supports_gpu_offload()) {
  2172. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n");
  2173. }
  2174. }
  2175. ).set_env("LLAMA_ARG_SPLIT_MODE"));
  2176. add_opt(common_arg(
  2177. {"-ts", "--tensor-split"}, "N0,N1,N2,...",
  2178. "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1",
  2179. [](common_params & params, const std::string & value) {
  2180. std::string arg_next = value;
  2181. // split string by , and /
  2182. const std::regex regex{ R"([,/]+)" };
  2183. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  2184. std::vector<std::string> split_arg{ it, {} };
  2185. if (split_arg.size() >= llama_max_devices()) {
  2186. throw std::invalid_argument(
  2187. string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices())
  2188. );
  2189. }
  2190. for (size_t i = 0; i < llama_max_devices(); ++i) {
  2191. if (i < split_arg.size()) {
  2192. params.tensor_split[i] = std::stof(split_arg[i]);
  2193. } else {
  2194. params.tensor_split[i] = 0.0f;
  2195. }
  2196. }
  2197. if (!llama_supports_gpu_offload()) {
  2198. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n");
  2199. }
  2200. }
  2201. ).set_env("LLAMA_ARG_TENSOR_SPLIT"));
  2202. add_opt(common_arg(
  2203. {"-mg", "--main-gpu"}, "INDEX",
  2204. string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu),
  2205. [](common_params & params, int value) {
  2206. params.main_gpu = value;
  2207. if (!llama_supports_gpu_offload()) {
  2208. fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n");
  2209. }
  2210. }
  2211. ).set_env("LLAMA_ARG_MAIN_GPU"));
  2212. add_opt(common_arg(
  2213. {"--check-tensors"},
  2214. string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"),
  2215. [](common_params & params) {
  2216. params.check_tensors = true;
  2217. }
  2218. ));
  2219. add_opt(common_arg(
  2220. {"--override-kv"}, "KEY=TYPE:VALUE",
  2221. "advanced option to override model metadata by key. may be specified multiple times.\n"
  2222. "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false",
  2223. [](common_params & params, const std::string & value) {
  2224. if (!string_parse_kv_override(value.c_str(), params.kv_overrides)) {
  2225. throw std::runtime_error(string_format("error: Invalid type for KV override: %s\n", value.c_str()));
  2226. }
  2227. }
  2228. ));
  2229. add_opt(common_arg(
  2230. {"--lora"}, "FNAME",
  2231. "path to LoRA adapter (can be repeated to use multiple adapters)",
  2232. [](common_params & params, const std::string & value) {
  2233. params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
  2234. }
  2235. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2236. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2237. add_opt(common_arg(
  2238. {"--lora-scaled"}, "FNAME", "SCALE",
  2239. "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
  2240. [](common_params & params, const std::string & fname, const std::string & scale) {
  2241. params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
  2242. }
  2243. // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
  2244. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
  2245. add_opt(common_arg(
  2246. {"--control-vector"}, "FNAME",
  2247. "add a control vector\nnote: this argument can be repeated to add multiple control vectors",
  2248. [](common_params & params, const std::string & value) {
  2249. params.control_vectors.push_back({ 1.0f, value, });
  2250. }
  2251. ));
  2252. add_opt(common_arg(
  2253. {"--control-vector-scaled"}, "FNAME", "SCALE",
  2254. "add a control vector with user defined scaling SCALE\n"
  2255. "note: this argument can be repeated to add multiple scaled control vectors",
  2256. [](common_params & params, const std::string & fname, const std::string & scale) {
  2257. params.control_vectors.push_back({ std::stof(scale), fname });
  2258. }
  2259. ));
  2260. add_opt(common_arg(
  2261. {"--control-vector-layer-range"}, "START", "END",
  2262. "layer range to apply the control vector(s) to, start and end inclusive",
  2263. [](common_params & params, const std::string & start, const std::string & end) {
  2264. params.control_vector_layer_start = std::stoi(start);
  2265. params.control_vector_layer_end = std::stoi(end);
  2266. }
  2267. ));
  2268. add_opt(common_arg(
  2269. {"-a", "--alias"}, "STRING",
  2270. "set alias for model name (to be used by REST API)",
  2271. [](common_params & params, const std::string & value) {
  2272. params.model_alias = value;
  2273. }
  2274. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS"));
  2275. add_opt(common_arg(
  2276. {"-m", "--model"}, "FNAME",
  2277. ex == LLAMA_EXAMPLE_EXPORT_LORA
  2278. ? std::string("model path from which to load base model")
  2279. : string_format(
  2280. "model path (default: `models/$filename` with filename from `--hf-file` "
  2281. "or `--model-url` if set, otherwise %s)", DEFAULT_MODEL_PATH
  2282. ),
  2283. [](common_params & params, const std::string & value) {
  2284. params.model.path = value;
  2285. }
  2286. ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL"));
  2287. add_opt(common_arg(
  2288. {"-mu", "--model-url"}, "MODEL_URL",
  2289. "model download url (default: unused)",
  2290. [](common_params & params, const std::string & value) {
  2291. params.model.url = value;
  2292. }
  2293. ).set_env("LLAMA_ARG_MODEL_URL"));
  2294. add_opt(common_arg(
  2295. {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]",
  2296. "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
  2297. "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n"
  2298. "example: unsloth/phi-4-GGUF:q4_k_m\n"
  2299. "(default: unused)",
  2300. [](common_params & params, const std::string & value) {
  2301. params.model.hf_repo = value;
  2302. }
  2303. ).set_env("LLAMA_ARG_HF_REPO"));
  2304. add_opt(common_arg(
  2305. {"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
  2306. "Same as --hf-repo, but for the draft model (default: unused)",
  2307. [](common_params & params, const std::string & value) {
  2308. params.speculative.model.hf_repo = value;
  2309. }
  2310. ).set_env("LLAMA_ARG_HFD_REPO"));
  2311. add_opt(common_arg(
  2312. {"-hff", "--hf-file"}, "FILE",
  2313. "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
  2314. [](common_params & params, const std::string & value) {
  2315. params.model.hf_file = value;
  2316. }
  2317. ).set_env("LLAMA_ARG_HF_FILE"));
  2318. add_opt(common_arg(
  2319. {"-hfv", "-hfrv", "--hf-repo-v"}, "<user>/<model>[:quant]",
  2320. "Hugging Face model repository for the vocoder model (default: unused)",
  2321. [](common_params & params, const std::string & value) {
  2322. params.vocoder.model.hf_repo = value;
  2323. }
  2324. ).set_env("LLAMA_ARG_HF_REPO_V"));
  2325. add_opt(common_arg(
  2326. {"-hffv", "--hf-file-v"}, "FILE",
  2327. "Hugging Face model file for the vocoder model (default: unused)",
  2328. [](common_params & params, const std::string & value) {
  2329. params.vocoder.model.hf_file = value;
  2330. }
  2331. ).set_env("LLAMA_ARG_HF_FILE_V"));
  2332. add_opt(common_arg(
  2333. {"-hft", "--hf-token"}, "TOKEN",
  2334. "Hugging Face access token (default: value from HF_TOKEN environment variable)",
  2335. [](common_params & params, const std::string & value) {
  2336. params.hf_token = value;
  2337. }
  2338. ).set_env("HF_TOKEN"));
  2339. add_opt(common_arg(
  2340. {"--context-file"}, "FNAME",
  2341. "file to load context from (repeat to specify multiple files)",
  2342. [](common_params & params, const std::string & value) {
  2343. std::ifstream file(value, std::ios::binary);
  2344. if (!file) {
  2345. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2346. }
  2347. params.context_files.push_back(value);
  2348. }
  2349. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2350. add_opt(common_arg(
  2351. {"--chunk-size"}, "N",
  2352. string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size),
  2353. [](common_params & params, int value) {
  2354. params.chunk_size = value;
  2355. }
  2356. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2357. add_opt(common_arg(
  2358. {"--chunk-separator"}, "STRING",
  2359. string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()),
  2360. [](common_params & params, const std::string & value) {
  2361. params.chunk_separator = value;
  2362. }
  2363. ).set_examples({LLAMA_EXAMPLE_RETRIEVAL}));
  2364. add_opt(common_arg(
  2365. {"--junk"}, "N",
  2366. string_format("number of times to repeat the junk text (default: %d)", params.n_junk),
  2367. [](common_params & params, int value) {
  2368. params.n_junk = value;
  2369. }
  2370. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  2371. add_opt(common_arg(
  2372. {"--pos"}, "N",
  2373. string_format("position of the passkey in the junk text (default: %d)", params.i_pos),
  2374. [](common_params & params, int value) {
  2375. params.i_pos = value;
  2376. }
  2377. ).set_examples({LLAMA_EXAMPLE_PASSKEY}));
  2378. add_opt(common_arg(
  2379. {"-o", "--output", "--output-file"}, "FNAME",
  2380. string_format("output file (default: '%s')", params.out_file.c_str()),
  2381. [](common_params & params, const std::string & value) {
  2382. params.out_file = value;
  2383. }
  2384. ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS}));
  2385. add_opt(common_arg(
  2386. {"-ofreq", "--output-frequency"}, "N",
  2387. string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
  2388. [](common_params & params, int value) {
  2389. params.n_out_freq = value;
  2390. }
  2391. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2392. add_opt(common_arg(
  2393. {"--save-frequency"}, "N",
  2394. string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),
  2395. [](common_params & params, int value) {
  2396. params.n_save_freq = value;
  2397. }
  2398. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2399. add_opt(common_arg(
  2400. {"--process-output"},
  2401. string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"),
  2402. [](common_params & params) {
  2403. params.process_output = true;
  2404. }
  2405. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2406. add_opt(common_arg(
  2407. {"--no-ppl"},
  2408. string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"),
  2409. [](common_params & params) {
  2410. params.compute_ppl = false;
  2411. }
  2412. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2413. add_opt(common_arg(
  2414. {"--chunk", "--from-chunk"}, "N",
  2415. string_format("start processing the input from chunk N (default: %d)", params.i_chunk),
  2416. [](common_params & params, int value) {
  2417. params.i_chunk = value;
  2418. }
  2419. ).set_examples({LLAMA_EXAMPLE_IMATRIX}));
  2420. add_opt(common_arg(
  2421. {"-pps"},
  2422. string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"),
  2423. [](common_params & params) {
  2424. params.is_pp_shared = true;
  2425. }
  2426. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2427. add_opt(common_arg(
  2428. {"-npp"}, "n0,n1,...",
  2429. "number of prompt tokens",
  2430. [](common_params & params, const std::string & value) {
  2431. auto p = string_split<int>(value, ',');
  2432. params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
  2433. }
  2434. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2435. add_opt(common_arg(
  2436. {"-ntg"}, "n0,n1,...",
  2437. "number of text generation tokens",
  2438. [](common_params & params, const std::string & value) {
  2439. auto p = string_split<int>(value, ',');
  2440. params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
  2441. }
  2442. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2443. add_opt(common_arg(
  2444. {"-npl"}, "n0,n1,...",
  2445. "number of parallel prompts",
  2446. [](common_params & params, const std::string & value) {
  2447. auto p = string_split<int>(value, ',');
  2448. params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
  2449. }
  2450. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2451. add_opt(common_arg(
  2452. {"--embd-normalize"}, "N",
  2453. string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize),
  2454. [](common_params & params, int value) {
  2455. params.embd_normalize = value;
  2456. }
  2457. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2458. add_opt(common_arg(
  2459. {"--embd-output-format"}, "FORMAT",
  2460. "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix",
  2461. [](common_params & params, const std::string & value) {
  2462. params.embd_out = value;
  2463. }
  2464. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2465. add_opt(common_arg(
  2466. {"--embd-separator"}, "STRING",
  2467. "separator of embeddings (default \\n) for example \"<#sep#>\"",
  2468. [](common_params & params, const std::string & value) {
  2469. params.embd_sep = value;
  2470. }
  2471. ).set_examples({LLAMA_EXAMPLE_EMBEDDING}));
  2472. add_opt(common_arg(
  2473. {"--host"}, "HOST",
  2474. string_format("ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: %s)", params.hostname.c_str()),
  2475. [](common_params & params, const std::string & value) {
  2476. params.hostname = value;
  2477. }
  2478. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST"));
  2479. add_opt(common_arg(
  2480. {"--port"}, "PORT",
  2481. string_format("port to listen (default: %d)", params.port),
  2482. [](common_params & params, int value) {
  2483. params.port = value;
  2484. }
  2485. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT"));
  2486. add_opt(common_arg(
  2487. {"--path"}, "PATH",
  2488. string_format("path to serve static files from (default: %s)", params.public_path.c_str()),
  2489. [](common_params & params, const std::string & value) {
  2490. params.public_path = value;
  2491. }
  2492. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
  2493. add_opt(common_arg(
  2494. {"--no-webui"},
  2495. string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),
  2496. [](common_params & params) {
  2497. params.webui = false;
  2498. }
  2499. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI"));
  2500. add_opt(common_arg(
  2501. {"--embedding", "--embeddings"},
  2502. string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"),
  2503. [](common_params & params) {
  2504. params.embedding = true;
  2505. }
  2506. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS"));
  2507. add_opt(common_arg(
  2508. {"--reranking", "--rerank"},
  2509. string_format("enable reranking endpoint on server (default: %s)", params.reranking ? "enabled" : "disabled"),
  2510. [](common_params & params) {
  2511. params.reranking = true;
  2512. }
  2513. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING"));
  2514. add_opt(common_arg(
  2515. {"--api-key"}, "KEY",
  2516. "API key to use for authentication (default: none)",
  2517. [](common_params & params, const std::string & value) {
  2518. params.api_keys.push_back(value);
  2519. }
  2520. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY"));
  2521. add_opt(common_arg(
  2522. {"--api-key-file"}, "FNAME",
  2523. "path to file containing API keys (default: none)",
  2524. [](common_params & params, const std::string & value) {
  2525. std::ifstream key_file(value);
  2526. if (!key_file) {
  2527. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2528. }
  2529. std::string key;
  2530. while (std::getline(key_file, key)) {
  2531. if (!key.empty()) {
  2532. params.api_keys.push_back(key);
  2533. }
  2534. }
  2535. key_file.close();
  2536. }
  2537. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2538. add_opt(common_arg(
  2539. {"--ssl-key-file"}, "FNAME",
  2540. "path to file a PEM-encoded SSL private key",
  2541. [](common_params & params, const std::string & value) {
  2542. params.ssl_file_key = value;
  2543. }
  2544. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE"));
  2545. add_opt(common_arg(
  2546. {"--ssl-cert-file"}, "FNAME",
  2547. "path to file a PEM-encoded SSL certificate",
  2548. [](common_params & params, const std::string & value) {
  2549. params.ssl_file_cert = value;
  2550. }
  2551. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE"));
  2552. add_opt(common_arg(
  2553. {"-to", "--timeout"}, "N",
  2554. string_format("server read/write timeout in seconds (default: %d)", params.timeout_read),
  2555. [](common_params & params, int value) {
  2556. params.timeout_read = value;
  2557. params.timeout_write = value;
  2558. }
  2559. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT"));
  2560. add_opt(common_arg(
  2561. {"--threads-http"}, "N",
  2562. string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http),
  2563. [](common_params & params, int value) {
  2564. params.n_threads_http = value;
  2565. }
  2566. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
  2567. add_opt(common_arg(
  2568. {"--cache-reuse"}, "N",
  2569. string_format("min chunk size to attempt reusing from the cache via KV shifting (default: %d)", params.n_cache_reuse),
  2570. [](common_params & params, int value) {
  2571. params.n_cache_reuse = value;
  2572. }
  2573. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
  2574. add_opt(common_arg(
  2575. {"--metrics"},
  2576. string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
  2577. [](common_params & params) {
  2578. params.endpoint_metrics = true;
  2579. }
  2580. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
  2581. add_opt(common_arg(
  2582. {"--slots"},
  2583. string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
  2584. [](common_params & params) {
  2585. params.endpoint_slots = true;
  2586. }
  2587. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
  2588. add_opt(common_arg(
  2589. {"--props"},
  2590. string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
  2591. [](common_params & params) {
  2592. params.endpoint_props = true;
  2593. }
  2594. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
  2595. add_opt(common_arg(
  2596. {"--no-slots"},
  2597. "disables slots monitoring endpoint",
  2598. [](common_params & params) {
  2599. params.endpoint_slots = false;
  2600. }
  2601. ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS"));
  2602. add_opt(common_arg(
  2603. {"--slot-save-path"}, "PATH",
  2604. "path to save slot kv cache (default: disabled)",
  2605. [](common_params & params, const std::string & value) {
  2606. params.slot_save_path = value;
  2607. // if doesn't end with DIRECTORY_SEPARATOR, add it
  2608. if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
  2609. params.slot_save_path += DIRECTORY_SEPARATOR;
  2610. }
  2611. }
  2612. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2613. add_opt(common_arg(
  2614. {"--jinja"},
  2615. "use jinja template for chat (default: disabled)",
  2616. [](common_params & params) {
  2617. params.use_jinja = true;
  2618. }
  2619. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA"));
  2620. add_opt(common_arg(
  2621. {"--reasoning-format"}, "FORMAT",
  2622. "reasoning format (default: deepseek; allowed values: deepseek, none)\n"
  2623. "controls whether thought tags are extracted from the response, and in which format they're returned. 'none' leaves thoughts unparsed in `message.content`, 'deepseek' puts them in `message.reasoning_content` (for DeepSeek R1 & Command R7B only).\n"
  2624. "only supported for non-streamed responses",
  2625. [](common_params & params, const std::string & value) {
  2626. /**/ if (value == "deepseek") { params.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; }
  2627. else if (value == "none") { params.reasoning_format = COMMON_REASONING_FORMAT_NONE; }
  2628. else { std::invalid_argument("invalid value"); }
  2629. }
  2630. ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK"));
  2631. add_opt(common_arg(
  2632. {"--chat-template"}, "JINJA_TEMPLATE",
  2633. string_format(
  2634. "set custom jinja chat template (default: template taken from model's metadata)\n"
  2635. "if suffix/prefix are specified, template will be disabled\n"
  2636. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2637. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2638. ),
  2639. [](common_params & params, const std::string & value) {
  2640. params.chat_template = value;
  2641. }
  2642. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
  2643. add_opt(common_arg(
  2644. {"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
  2645. string_format(
  2646. "set custom jinja chat template file (default: template taken from model's metadata)\n"
  2647. "if suffix/prefix are specified, template will be disabled\n"
  2648. "only commonly used templates are accepted (unless --jinja is set before this flag):\n"
  2649. "list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
  2650. ),
  2651. [](common_params & params, const std::string & value) {
  2652. std::ifstream file(value);
  2653. if (!file) {
  2654. throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
  2655. }
  2656. std::copy(
  2657. std::istreambuf_iterator<char>(file),
  2658. std::istreambuf_iterator<char>(),
  2659. std::back_inserter(params.chat_template));
  2660. }
  2661. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
  2662. add_opt(common_arg(
  2663. {"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
  2664. string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),
  2665. [](common_params & params, const std::string & value) {
  2666. params.slot_prompt_similarity = std::stof(value);
  2667. }
  2668. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2669. add_opt(common_arg(
  2670. {"--lora-init-without-apply"},
  2671. string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"),
  2672. [](common_params & params) {
  2673. params.lora_init_without_apply = true;
  2674. }
  2675. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  2676. add_opt(common_arg(
  2677. {"--simple-io"},
  2678. "use basic IO for better compatibility in subprocesses and limited consoles",
  2679. [](common_params & params) {
  2680. params.simple_io = true;
  2681. }
  2682. ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
  2683. add_opt(common_arg(
  2684. {"--positive-file"}, "FNAME",
  2685. string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),
  2686. [](common_params & params, const std::string & value) {
  2687. params.cvector_positive_file = value;
  2688. }
  2689. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2690. add_opt(common_arg(
  2691. {"--negative-file"}, "FNAME",
  2692. string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()),
  2693. [](common_params & params, const std::string & value) {
  2694. params.cvector_negative_file = value;
  2695. }
  2696. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2697. add_opt(common_arg(
  2698. {"--pca-batch"}, "N",
  2699. string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch),
  2700. [](common_params & params, int value) {
  2701. params.n_pca_batch = value;
  2702. }
  2703. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2704. add_opt(common_arg(
  2705. {"--pca-iter"}, "N",
  2706. string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations),
  2707. [](common_params & params, int value) {
  2708. params.n_pca_iterations = value;
  2709. }
  2710. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2711. add_opt(common_arg(
  2712. {"--method"}, "{pca, mean}",
  2713. "dimensionality reduction method to be used (default: pca)",
  2714. [](common_params & params, const std::string & value) {
  2715. /**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
  2716. else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
  2717. else { throw std::invalid_argument("invalid value"); }
  2718. }
  2719. ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR}));
  2720. add_opt(common_arg(
  2721. {"--output-format"}, "{md,jsonl}",
  2722. "output format for batched-bench results (default: md)",
  2723. [](common_params & params, const std::string & value) {
  2724. /**/ if (value == "jsonl") { params.batched_bench_output_jsonl = true; }
  2725. else if (value == "md") { params.batched_bench_output_jsonl = false; }
  2726. else { std::invalid_argument("invalid value"); }
  2727. }
  2728. ).set_examples({LLAMA_EXAMPLE_BENCH}));
  2729. add_opt(common_arg(
  2730. {"--log-disable"},
  2731. "Log disable",
  2732. [](common_params &) {
  2733. common_log_pause(common_log_main());
  2734. }
  2735. ));
  2736. add_opt(common_arg(
  2737. {"--log-file"}, "FNAME",
  2738. "Log to file",
  2739. [](common_params &, const std::string & value) {
  2740. common_log_set_file(common_log_main(), value.c_str());
  2741. }
  2742. ));
  2743. add_opt(common_arg(
  2744. {"--log-colors"},
  2745. "Enable colored logging",
  2746. [](common_params &) {
  2747. common_log_set_colors(common_log_main(), true);
  2748. }
  2749. ).set_env("LLAMA_LOG_COLORS"));
  2750. add_opt(common_arg(
  2751. {"-v", "--verbose", "--log-verbose"},
  2752. "Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
  2753. [](common_params & params) {
  2754. params.verbosity = INT_MAX;
  2755. common_log_set_verbosity_thold(INT_MAX);
  2756. }
  2757. ));
  2758. add_opt(common_arg(
  2759. {"-lv", "--verbosity", "--log-verbosity"}, "N",
  2760. "Set the verbosity threshold. Messages with a higher verbosity will be ignored.",
  2761. [](common_params & params, int value) {
  2762. params.verbosity = value;
  2763. common_log_set_verbosity_thold(value);
  2764. }
  2765. ).set_env("LLAMA_LOG_VERBOSITY"));
  2766. add_opt(common_arg(
  2767. {"--log-prefix"},
  2768. "Enable prefix in log messages",
  2769. [](common_params &) {
  2770. common_log_set_prefix(common_log_main(), true);
  2771. }
  2772. ).set_env("LLAMA_LOG_PREFIX"));
  2773. add_opt(common_arg(
  2774. {"--log-timestamps"},
  2775. "Enable timestamps in log messages",
  2776. [](common_params &) {
  2777. common_log_set_timestamps(common_log_main(), true);
  2778. }
  2779. ).set_env("LLAMA_LOG_TIMESTAMPS"));
  2780. // speculative parameters
  2781. add_opt(common_arg(
  2782. {"-td", "--threads-draft"}, "N",
  2783. "number of threads to use during generation (default: same as --threads)",
  2784. [](common_params & params, int value) {
  2785. params.speculative.cpuparams.n_threads = value;
  2786. if (params.speculative.cpuparams.n_threads <= 0) {
  2787. params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
  2788. }
  2789. }
  2790. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2791. add_opt(common_arg(
  2792. {"-tbd", "--threads-batch-draft"}, "N",
  2793. "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
  2794. [](common_params & params, int value) {
  2795. params.speculative.cpuparams_batch.n_threads = value;
  2796. if (params.speculative.cpuparams_batch.n_threads <= 0) {
  2797. params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
  2798. }
  2799. }
  2800. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2801. add_opt(common_arg(
  2802. {"-Cd", "--cpu-mask-draft"}, "M",
  2803. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2804. [](common_params & params, const std::string & mask) {
  2805. params.speculative.cpuparams.mask_valid = true;
  2806. if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) {
  2807. throw std::invalid_argument("invalid cpumask");
  2808. }
  2809. }
  2810. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2811. add_opt(common_arg(
  2812. {"-Crd", "--cpu-range-draft"}, "lo-hi",
  2813. "Ranges of CPUs for affinity. Complements --cpu-mask-draft",
  2814. [](common_params & params, const std::string & range) {
  2815. params.speculative.cpuparams.mask_valid = true;
  2816. if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) {
  2817. throw std::invalid_argument("invalid range");
  2818. }
  2819. }
  2820. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2821. add_opt(common_arg(
  2822. {"--cpu-strict-draft"}, "<0|1>",
  2823. "Use strict CPU placement for draft model (default: same as --cpu-strict)",
  2824. [](common_params & params, int value) {
  2825. params.speculative.cpuparams.strict_cpu = value;
  2826. }
  2827. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2828. add_opt(common_arg(
  2829. {"--prio-draft"}, "N",
  2830. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority),
  2831. [](common_params & params, int prio) {
  2832. if (prio < 0 || prio > 3) {
  2833. throw std::invalid_argument("invalid value");
  2834. }
  2835. params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio;
  2836. }
  2837. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2838. add_opt(common_arg(
  2839. {"--poll-draft"}, "<0|1>",
  2840. "Use polling to wait for draft model work (default: same as --poll])",
  2841. [](common_params & params, int value) {
  2842. params.speculative.cpuparams.poll = value;
  2843. }
  2844. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2845. add_opt(common_arg(
  2846. {"-Cbd", "--cpu-mask-batch-draft"}, "M",
  2847. "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
  2848. [](common_params & params, const std::string & mask) {
  2849. params.speculative.cpuparams_batch.mask_valid = true;
  2850. if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) {
  2851. throw std::invalid_argument("invalid cpumask");
  2852. }
  2853. }
  2854. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2855. add_opt(common_arg(
  2856. {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi",
  2857. "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)",
  2858. [](common_params & params, const std::string & range) {
  2859. params.speculative.cpuparams_batch.mask_valid = true;
  2860. if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) {
  2861. throw std::invalid_argument("invalid cpumask");
  2862. }
  2863. }
  2864. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2865. add_opt(common_arg(
  2866. {"--cpu-strict-batch-draft"}, "<0|1>",
  2867. "Use strict CPU placement for draft model (default: --cpu-strict-draft)",
  2868. [](common_params & params, int value) {
  2869. params.speculative.cpuparams_batch.strict_cpu = value;
  2870. }
  2871. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2872. add_opt(common_arg(
  2873. {"--prio-batch-draft"}, "N",
  2874. string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority),
  2875. [](common_params & params, int prio) {
  2876. if (prio < 0 || prio > 3) {
  2877. throw std::invalid_argument("invalid value");
  2878. }
  2879. params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio;
  2880. }
  2881. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2882. add_opt(common_arg(
  2883. {"--poll-batch-draft"}, "<0|1>",
  2884. "Use polling to wait for draft model work (default: --poll-draft)",
  2885. [](common_params & params, int value) {
  2886. params.speculative.cpuparams_batch.poll = value;
  2887. }
  2888. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
  2889. add_opt(common_arg(
  2890. {"--draft-max", "--draft", "--draft-n"}, "N",
  2891. string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max),
  2892. [](common_params & params, int value) {
  2893. params.speculative.n_max = value;
  2894. }
  2895. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MAX"));
  2896. add_opt(common_arg(
  2897. {"--draft-min", "--draft-n-min"}, "N",
  2898. string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min),
  2899. [](common_params & params, int value) {
  2900. params.speculative.n_min = value;
  2901. }
  2902. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MIN"));
  2903. add_opt(common_arg(
  2904. {"--draft-p-split"}, "P",
  2905. string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split),
  2906. [](common_params & params, const std::string & value) {
  2907. params.speculative.p_split = std::stof(value);
  2908. }
  2909. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT"));
  2910. add_opt(common_arg(
  2911. {"--draft-p-min"}, "P",
  2912. string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min),
  2913. [](common_params & params, const std::string & value) {
  2914. params.speculative.p_min = std::stof(value);
  2915. }
  2916. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_P_MIN"));
  2917. add_opt(common_arg(
  2918. {"-cd", "--ctx-size-draft"}, "N",
  2919. string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx),
  2920. [](common_params & params, int value) {
  2921. params.speculative.n_ctx = value;
  2922. }
  2923. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT"));
  2924. add_opt(common_arg(
  2925. {"-devd", "--device-draft"}, "<dev1,dev2,..>",
  2926. "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n"
  2927. "use --list-devices to see a list of available devices",
  2928. [](common_params & params, const std::string & value) {
  2929. params.speculative.devices = parse_device_list(value);
  2930. }
  2931. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
  2932. add_opt(common_arg(
  2933. {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N",
  2934. "number of layers to store in VRAM for the draft model",
  2935. [](common_params & params, int value) {
  2936. params.speculative.n_gpu_layers = value;
  2937. if (!llama_supports_gpu_offload()) {
  2938. fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n");
  2939. fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n");
  2940. fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n");
  2941. }
  2942. }
  2943. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT"));
  2944. add_opt(common_arg(
  2945. {"-md", "--model-draft"}, "FNAME",
  2946. "draft model for speculative decoding (default: unused)",
  2947. [](common_params & params, const std::string & value) {
  2948. params.speculative.model.path = value;
  2949. }
  2950. ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
  2951. add_opt(common_arg(
  2952. {"-mv", "--model-vocoder"}, "FNAME",
  2953. "vocoder model for audio generation (default: unused)",
  2954. [](common_params & params, const std::string & value) {
  2955. params.vocoder.model.path = value;
  2956. }
  2957. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2958. add_opt(common_arg(
  2959. {"--tts-use-guide-tokens"},
  2960. "Use guide tokens to improve TTS word recall",
  2961. [](common_params & params) {
  2962. params.vocoder.use_guide_tokens = true;
  2963. }
  2964. ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
  2965. add_opt(common_arg(
  2966. {"--tts-speaker-file"}, "FNAME",
  2967. "speaker file path for audio generation",
  2968. [](common_params & params, const std::string & value) {
  2969. params.vocoder.speaker_file = value;
  2970. }
  2971. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2972. // model-specific
  2973. add_opt(common_arg(
  2974. {"--tts-oute-default"},
  2975. string_format("use default OuteTTS models (note: can download weights from the internet)"),
  2976. [](common_params & params) {
  2977. params.model.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
  2978. params.model.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
  2979. params.vocoder.model.hf_repo = "ggml-org/WavTokenizer";
  2980. params.vocoder.model.hf_file = "WavTokenizer-Large-75-F16.gguf";
  2981. }
  2982. ).set_examples({LLAMA_EXAMPLE_TTS}));
  2983. add_opt(common_arg(
  2984. {"--embd-bge-small-en-default"},
  2985. string_format("use default bge-small-en-v1.5 model (note: can download weights from the internet)"),
  2986. [](common_params & params) {
  2987. params.model.hf_repo = "ggml-org/bge-small-en-v1.5-Q8_0-GGUF";
  2988. params.model.hf_file = "bge-small-en-v1.5-q8_0.gguf";
  2989. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  2990. params.embd_normalize = 2;
  2991. params.n_ctx = 512;
  2992. params.verbose_prompt = true;
  2993. params.embedding = true;
  2994. }
  2995. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  2996. add_opt(common_arg(
  2997. {"--embd-e5-small-en-default"},
  2998. string_format("use default e5-small-v2 model (note: can download weights from the internet)"),
  2999. [](common_params & params) {
  3000. params.model.hf_repo = "ggml-org/e5-small-v2-Q8_0-GGUF";
  3001. params.model.hf_file = "e5-small-v2-q8_0.gguf";
  3002. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  3003. params.embd_normalize = 2;
  3004. params.n_ctx = 512;
  3005. params.verbose_prompt = true;
  3006. params.embedding = true;
  3007. }
  3008. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3009. add_opt(common_arg(
  3010. {"--embd-gte-small-default"},
  3011. string_format("use default gte-small model (note: can download weights from the internet)"),
  3012. [](common_params & params) {
  3013. params.model.hf_repo = "ggml-org/gte-small-Q8_0-GGUF";
  3014. params.model.hf_file = "gte-small-q8_0.gguf";
  3015. params.pooling_type = LLAMA_POOLING_TYPE_NONE;
  3016. params.embd_normalize = 2;
  3017. params.n_ctx = 512;
  3018. params.verbose_prompt = true;
  3019. params.embedding = true;
  3020. }
  3021. ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER}));
  3022. add_opt(common_arg(
  3023. {"--fim-qwen-1.5b-default"},
  3024. string_format("use default Qwen 2.5 Coder 1.5B (note: can download weights from the internet)"),
  3025. [](common_params & params) {
  3026. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
  3027. params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
  3028. params.port = 8012;
  3029. params.n_gpu_layers = 99;
  3030. params.flash_attn = true;
  3031. params.n_ubatch = 1024;
  3032. params.n_batch = 1024;
  3033. params.n_ctx = 0;
  3034. params.n_cache_reuse = 256;
  3035. }
  3036. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3037. add_opt(common_arg(
  3038. {"--fim-qwen-3b-default"},
  3039. string_format("use default Qwen 2.5 Coder 3B (note: can download weights from the internet)"),
  3040. [](common_params & params) {
  3041. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
  3042. params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
  3043. params.port = 8012;
  3044. params.n_gpu_layers = 99;
  3045. params.flash_attn = true;
  3046. params.n_ubatch = 1024;
  3047. params.n_batch = 1024;
  3048. params.n_ctx = 0;
  3049. params.n_cache_reuse = 256;
  3050. }
  3051. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3052. add_opt(common_arg(
  3053. {"--fim-qwen-7b-default"},
  3054. string_format("use default Qwen 2.5 Coder 7B (note: can download weights from the internet)"),
  3055. [](common_params & params) {
  3056. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3057. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3058. params.port = 8012;
  3059. params.n_gpu_layers = 99;
  3060. params.flash_attn = true;
  3061. params.n_ubatch = 1024;
  3062. params.n_batch = 1024;
  3063. params.n_ctx = 0;
  3064. params.n_cache_reuse = 256;
  3065. }
  3066. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3067. add_opt(common_arg(
  3068. {"--fim-qwen-7b-spec"},
  3069. string_format("use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3070. [](common_params & params) {
  3071. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
  3072. params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
  3073. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3074. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3075. params.speculative.n_gpu_layers = 99;
  3076. params.port = 8012;
  3077. params.n_gpu_layers = 99;
  3078. params.flash_attn = true;
  3079. params.n_ubatch = 1024;
  3080. params.n_batch = 1024;
  3081. params.n_ctx = 0;
  3082. params.n_cache_reuse = 256;
  3083. }
  3084. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3085. add_opt(common_arg(
  3086. {"--fim-qwen-14b-spec"},
  3087. string_format("use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet)"),
  3088. [](common_params & params) {
  3089. params.model.hf_repo = "ggml-org/Qwen2.5-Coder-14B-Q8_0-GGUF";
  3090. params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
  3091. params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
  3092. params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
  3093. params.speculative.n_gpu_layers = 99;
  3094. params.port = 8012;
  3095. params.n_gpu_layers = 99;
  3096. params.flash_attn = true;
  3097. params.n_ubatch = 1024;
  3098. params.n_batch = 1024;
  3099. params.n_ctx = 0;
  3100. params.n_cache_reuse = 256;
  3101. }
  3102. ).set_examples({LLAMA_EXAMPLE_SERVER}));
  3103. return ctx_arg;
  3104. }