server.cpp 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "grammar-parser.h"
  4. #include "utils.hpp"
  5. #include "oai.hpp"
  6. #include "../llava/clip.h"
  7. #include "stb_image.h"
  8. #ifndef NDEBUG
  9. // crash the server in debug mode, otherwise send an http 500 error
  10. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  11. #endif
  12. // increase max payload length to allow use of larger context size
  13. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  14. #include "httplib.h"
  15. #include "json.hpp"
  16. // auto generated files (update with ./deps.sh)
  17. #include "index.html.hpp"
  18. #include "index.js.hpp"
  19. #include "completion.js.hpp"
  20. #include "json-schema-to-grammar.mjs.hpp"
  21. #include <cstddef>
  22. #include <thread>
  23. #include <chrono>
  24. #include <condition_variable>
  25. #include <atomic>
  26. using json = nlohmann::json;
  27. struct server_params
  28. {
  29. std::string hostname = "127.0.0.1";
  30. std::vector<std::string> api_keys;
  31. std::string public_path = "examples/server/public";
  32. int32_t port = 8080;
  33. int32_t read_timeout = 600;
  34. int32_t write_timeout = 600;
  35. };
  36. bool server_verbose = false;
  37. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  38. {
  39. size_t i;
  40. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  41. {
  42. }
  43. return i;
  44. }
  45. enum stop_type
  46. {
  47. STOP_FULL,
  48. STOP_PARTIAL,
  49. };
  50. static bool ends_with(const std::string &str, const std::string &suffix)
  51. {
  52. return str.size() >= suffix.size() &&
  53. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  54. }
  55. static size_t find_partial_stop_string(const std::string &stop,
  56. const std::string &text)
  57. {
  58. if (!text.empty() && !stop.empty())
  59. {
  60. const char text_last_char = text.back();
  61. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  62. {
  63. if (stop[char_index] == text_last_char)
  64. {
  65. const std::string current_partial = stop.substr(0, char_index + 1);
  66. if (ends_with(text, current_partial))
  67. {
  68. return text.size() - char_index - 1;
  69. }
  70. }
  71. }
  72. }
  73. return std::string::npos;
  74. }
  75. // TODO: reuse llama_detokenize
  76. template <class Iter>
  77. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  78. {
  79. std::string ret;
  80. for (; begin != end; ++begin)
  81. {
  82. ret += llama_token_to_piece(ctx, *begin);
  83. }
  84. return ret;
  85. }
  86. // format incomplete utf-8 multibyte character for output
  87. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  88. {
  89. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  90. // if the size is 1 and first bit is 1, meaning it's a partial character
  91. // (size > 1 meaning it's already a known token)
  92. if (out.size() == 1 && (out[0] & 0x80) == 0x80)
  93. {
  94. std::stringstream ss;
  95. ss << std::hex << (out[0] & 0xff);
  96. std::string res(ss.str());
  97. out = "byte: \\x" + res;
  98. }
  99. return out;
  100. }
  101. // convert a vector of completion_token_output to json
  102. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> &probs)
  103. {
  104. json out = json::array();
  105. for (const auto &prob : probs)
  106. {
  107. json probs_for_token = json::array();
  108. for (const auto &p : prob.probs)
  109. {
  110. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  111. probs_for_token.push_back(json
  112. {
  113. {"tok_str", tok_str},
  114. {"prob", p.prob},
  115. });
  116. }
  117. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  118. out.push_back(json{
  119. {"content", tok_str},
  120. {"probs", probs_for_token},
  121. });
  122. }
  123. return out;
  124. }
  125. struct llama_client_slot
  126. {
  127. int id;
  128. int task_id = -1;
  129. struct slot_params params;
  130. slot_state state = IDLE;
  131. slot_command command = NONE;
  132. // used to determine the slot that has been used the longest
  133. int64_t t_last_used = -1;
  134. // generation props
  135. int32_t n_ctx = 0; // context size per slot
  136. int32_t n_past = 0;
  137. int32_t n_decoded = 0;
  138. int32_t n_remaining = -1;
  139. int32_t i_batch = -1;
  140. int32_t num_prompt_tokens = 0;
  141. int32_t num_prompt_tokens_processed = 0;
  142. json prompt;
  143. std::string generated_text;
  144. llama_token sampled;
  145. std::vector<llama_token> cache_tokens;
  146. std::vector<completion_token_output> generated_token_probs;
  147. bool infill = false;
  148. bool embedding = false;
  149. bool has_next_token = true;
  150. bool truncated = false;
  151. bool stopped_eos = false;
  152. bool stopped_word = false;
  153. bool stopped_limit = false;
  154. bool oaicompat = false;
  155. std::string oaicompat_model;
  156. std::string stopping_word;
  157. // sampling
  158. struct llama_sampling_params sparams;
  159. llama_sampling_context *ctx_sampling = nullptr;
  160. int32_t ga_i = 0; // group-attention state
  161. int32_t ga_n = 1; // group-attention factor
  162. int32_t ga_w = 512; // group-attention width
  163. int32_t n_past_se = 0; // self-extend
  164. // multimodal
  165. std::vector<slot_image> images;
  166. // stats
  167. size_t sent_count = 0;
  168. size_t sent_token_probs_index = 0;
  169. int64_t t_start_process_prompt;
  170. int64_t t_start_genereration;
  171. double t_prompt_processing; // ms
  172. double t_token_generation; // ms
  173. // multitasks
  174. int multitask_id = -1;
  175. void reset() {
  176. num_prompt_tokens = 0;
  177. generated_text = "";
  178. truncated = false;
  179. stopped_eos = false;
  180. stopped_word = false;
  181. stopped_limit = false;
  182. stopping_word = "";
  183. n_past = 0;
  184. sent_count = 0;
  185. sent_token_probs_index = 0;
  186. infill = false;
  187. ga_i = 0;
  188. n_past_se = 0;
  189. generated_token_probs.clear();
  190. for (slot_image & img : images)
  191. {
  192. free(img.image_embedding);
  193. if (img.img_data) {
  194. clip_image_u8_free(img.img_data);
  195. }
  196. img.prefix_prompt = "";
  197. }
  198. images.clear();
  199. }
  200. bool has_budget(gpt_params &global_params) {
  201. if (params.n_predict == -1 && global_params.n_predict == -1)
  202. {
  203. return true; // limitless
  204. }
  205. n_remaining = -1;
  206. if (params.n_predict != -1)
  207. {
  208. n_remaining = params.n_predict - n_decoded;
  209. }
  210. else if (global_params.n_predict != -1)
  211. {
  212. n_remaining = global_params.n_predict - n_decoded;
  213. }
  214. return n_remaining > 0; // no budget
  215. }
  216. bool available() const {
  217. return state == IDLE && command == NONE;
  218. }
  219. bool is_processing() const {
  220. return (state == IDLE && command == LOAD_PROMPT) || state == PROCESSING;
  221. }
  222. void add_token_string(const completion_token_output &token) {
  223. if (command == RELEASE)
  224. {
  225. return;
  226. }
  227. cache_tokens.push_back(token.tok);
  228. generated_token_probs.push_back(token);
  229. }
  230. void release() {
  231. if (state == PROCESSING)
  232. {
  233. t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3;
  234. command = RELEASE;
  235. }
  236. }
  237. json get_formated_timings() {
  238. return json
  239. {
  240. {"prompt_n", num_prompt_tokens_processed},
  241. {"prompt_ms", t_prompt_processing},
  242. {"prompt_per_token_ms", t_prompt_processing / num_prompt_tokens_processed},
  243. {"prompt_per_second", 1e3 / t_prompt_processing * num_prompt_tokens_processed},
  244. {"predicted_n", n_decoded},
  245. {"predicted_ms", t_token_generation},
  246. {"predicted_per_token_ms", t_token_generation / n_decoded},
  247. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  248. };
  249. }
  250. void print_timings() const {
  251. LOG_TEE("\n");
  252. LOG_TEE("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  253. __func__, t_prompt_processing, num_prompt_tokens_processed, t_prompt_processing / num_prompt_tokens_processed, 1e3 / t_prompt_processing * num_prompt_tokens_processed);
  254. LOG_TEE("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  255. __func__, t_token_generation, n_decoded,t_token_generation / n_decoded, 1e3 / t_token_generation * n_decoded);
  256. LOG_TEE("%s: total time = %10.2f ms\n", __func__, t_prompt_processing + t_token_generation);
  257. }
  258. };
  259. struct llama_server_context
  260. {
  261. llama_model *model = nullptr;
  262. llama_context *ctx = nullptr;
  263. clip_ctx *clp_ctx = nullptr;
  264. gpt_params params;
  265. llama_batch batch;
  266. bool multimodal = false;
  267. bool clean_kv_cache = true;
  268. bool all_slots_are_idle = false;
  269. bool add_bos_token = true;
  270. int32_t n_ctx; // total context for all clients / slots
  271. // system prompt
  272. bool system_need_update = false;
  273. std::string system_prompt;
  274. std::vector<llama_token> system_tokens;
  275. std::string name_user; // this should be the antiprompt
  276. std::string name_assistant;
  277. // slots / clients
  278. std::vector<llama_client_slot> slots;
  279. llama_server_queue queue_tasks;
  280. llama_server_response queue_results;
  281. ~llama_server_context()
  282. {
  283. if (ctx)
  284. {
  285. llama_free(ctx);
  286. ctx = nullptr;
  287. }
  288. if (model)
  289. {
  290. llama_free_model(model);
  291. model = nullptr;
  292. }
  293. }
  294. bool load_model(const gpt_params &params_)
  295. {
  296. params = params_;
  297. if (!params.mmproj.empty()) {
  298. multimodal = true;
  299. LOG_TEE("Multi Modal Mode Enabled");
  300. clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
  301. if(clp_ctx == nullptr) {
  302. LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
  303. return false;
  304. }
  305. if (params.n_ctx < 2048) { // request larger context for the image embedding
  306. params.n_ctx = 2048;
  307. }
  308. }
  309. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  310. if (model == nullptr)
  311. {
  312. LOG_ERROR("unable to load model", {{"model", params.model}});
  313. return false;
  314. }
  315. if (multimodal) {
  316. const int n_embd_clip = clip_n_mmproj_embd(clp_ctx);
  317. const int n_embd_llm = llama_n_embd(model);
  318. if (n_embd_clip != n_embd_llm) {
  319. LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm);
  320. llama_free(ctx);
  321. llama_free_model(model);
  322. return false;
  323. }
  324. }
  325. n_ctx = llama_n_ctx(ctx);
  326. add_bos_token = llama_should_add_bos_token(model);
  327. return true;
  328. }
  329. void initialize() {
  330. // create slots
  331. all_slots_are_idle = true;
  332. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  333. LOG_TEE("Available slots:\n");
  334. for (int i = 0; i < params.n_parallel; i++)
  335. {
  336. llama_client_slot slot;
  337. slot.id = i;
  338. slot.n_ctx = n_ctx_slot;
  339. LOG_TEE(" -> Slot %i - max context: %i\n", slot.id, n_ctx_slot);
  340. const int ga_n = params.grp_attn_n;
  341. const int ga_w = params.grp_attn_w;
  342. if (ga_n != 1) {
  343. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  344. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  345. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  346. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  347. LOG_TEE(" -> Slot %i - self-extend: ga_n = %d, ga_w = %d\n", slot.id, ga_n, ga_w);
  348. }
  349. slot.ga_i = 0;
  350. slot.ga_n = ga_n;
  351. slot.ga_w = ga_w;
  352. slot.reset();
  353. slots.push_back(slot);
  354. }
  355. batch = llama_batch_init(n_ctx, 0, params.n_parallel);
  356. // empty system prompt
  357. system_prompt = "";
  358. system_tokens.clear();
  359. }
  360. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  361. {
  362. // TODO: currently, we tokenize using special tokens by default
  363. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  364. // but it's better compared to completely ignoring ChatML and other chat templates
  365. const bool TMP_FORCE_SPECIAL = true;
  366. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  367. // or the first element of the json_prompt array is a string.
  368. std::vector<llama_token> prompt_tokens;
  369. if (json_prompt.is_array())
  370. {
  371. bool first = true;
  372. for (const auto& p : json_prompt)
  373. {
  374. if (p.is_string())
  375. {
  376. auto s = p.template get<std::string>();
  377. std::vector<llama_token> p;
  378. if (first)
  379. {
  380. p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  381. first = false;
  382. }
  383. else
  384. {
  385. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  386. }
  387. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  388. }
  389. else
  390. {
  391. if (first)
  392. {
  393. first = false;
  394. }
  395. prompt_tokens.push_back(p.template get<llama_token>());
  396. }
  397. }
  398. }
  399. else
  400. {
  401. auto s = json_prompt.template get<std::string>();
  402. prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  403. }
  404. return prompt_tokens;
  405. }
  406. llama_client_slot* get_slot(int id) {
  407. int64_t t_last = ggml_time_us();
  408. llama_client_slot *last_used = nullptr;
  409. for (llama_client_slot & slot : slots)
  410. {
  411. if (slot.id == id && slot.available())
  412. {
  413. return &slot;
  414. }
  415. if (slot.available() && slot.t_last_used < t_last)
  416. {
  417. last_used = &slot;
  418. t_last = slot.t_last_used;
  419. }
  420. }
  421. return last_used;
  422. }
  423. bool launch_slot_with_data(llama_client_slot* &slot, json data) {
  424. slot_params default_params;
  425. llama_sampling_params default_sparams;
  426. if (data.count("__oaicompat") != 0) {
  427. slot->oaicompat = true;
  428. slot->oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  429. } else {
  430. slot->oaicompat = false;
  431. slot->oaicompat_model = "";
  432. }
  433. slot->params.stream = json_value(data, "stream", false);
  434. slot->params.cache_prompt = json_value(data, "cache_prompt", false);
  435. slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  436. slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  437. slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  438. slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  439. slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  440. slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  441. slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
  442. slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  443. slot->sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  444. slot->sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  445. slot->sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  446. slot->sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  447. slot->sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  448. slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  449. slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  450. slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
  451. slot->params.seed = json_value(data, "seed", default_params.seed);
  452. slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  453. slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  454. // infill
  455. if (data.count("input_prefix") != 0)
  456. {
  457. slot->params.input_prefix = data["input_prefix"];
  458. }
  459. else
  460. {
  461. slot->params.input_prefix = "";
  462. }
  463. if (data.count("input_suffix") != 0)
  464. {
  465. slot->params.input_suffix = data["input_suffix"];
  466. }
  467. else
  468. {
  469. slot->params.input_suffix = "";
  470. }
  471. if (data.count("prompt") != 0)
  472. {
  473. slot->prompt = data["prompt"];
  474. }
  475. else
  476. {
  477. slot->prompt = "";
  478. }
  479. slot->sparams.penalty_prompt_tokens.clear();
  480. slot->sparams.use_penalty_prompt_tokens = false;
  481. const auto &penalty_prompt = data.find("penalty_prompt");
  482. if (penalty_prompt != data.end())
  483. {
  484. if (penalty_prompt->is_string())
  485. {
  486. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  487. auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
  488. slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
  489. if (slot->params.n_predict > 0)
  490. {
  491. slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
  492. }
  493. slot->sparams.use_penalty_prompt_tokens = true;
  494. }
  495. else if (penalty_prompt->is_array())
  496. {
  497. const auto n_tokens = penalty_prompt->size();
  498. slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
  499. const int n_vocab = llama_n_vocab(model);
  500. for (const auto &penalty_token : *penalty_prompt)
  501. {
  502. if (penalty_token.is_number_integer())
  503. {
  504. const auto tok = penalty_token.get<llama_token>();
  505. if (tok >= 0 && tok < n_vocab)
  506. {
  507. slot->sparams.penalty_prompt_tokens.push_back(tok);
  508. }
  509. }
  510. }
  511. slot->sparams.use_penalty_prompt_tokens = true;
  512. }
  513. }
  514. slot->sparams.logit_bias.clear();
  515. if (json_value(data, "ignore_eos", false))
  516. {
  517. slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  518. }
  519. const auto &logit_bias = data.find("logit_bias");
  520. if (logit_bias != data.end() && logit_bias->is_array())
  521. {
  522. const int n_vocab = llama_n_vocab(model);
  523. for (const auto &el : *logit_bias)
  524. {
  525. if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
  526. {
  527. llama_token tok = el[0].get<llama_token>();
  528. if (tok >= 0 && tok < n_vocab)
  529. {
  530. if (el[1].is_number())
  531. {
  532. slot->sparams.logit_bias[tok] = el[1].get<float>();
  533. }
  534. else if (el[1].is_boolean() && !el[1].get<bool>())
  535. {
  536. slot->sparams.logit_bias[tok] = -INFINITY;
  537. }
  538. }
  539. }
  540. }
  541. }
  542. slot->params.antiprompt.clear();
  543. const auto &stop = data.find("stop");
  544. if (stop != data.end() && stop->is_array())
  545. {
  546. for (const auto &word : *stop)
  547. {
  548. if (!word.empty())
  549. {
  550. slot->params.antiprompt.push_back(word);
  551. }
  552. }
  553. }
  554. if (multimodal)
  555. {
  556. const auto &images_data = data.find("image_data");
  557. if (images_data != data.end() && images_data->is_array())
  558. {
  559. for (const auto &img : *images_data)
  560. {
  561. const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
  562. slot_image img_sl;
  563. img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
  564. img_sl.img_data = clip_image_u8_init();
  565. if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
  566. {
  567. LOG_TEE("slot %i - failed to load image [id: %i]\n", slot->id, img_sl.id);
  568. return false;
  569. }
  570. LOG_TEE("slot %i - loaded image\n", slot->id);
  571. img_sl.request_encode_image = true;
  572. slot->images.push_back(img_sl);
  573. }
  574. // process prompt
  575. // example: system prompt [img-102] user [img-103] describe [img-134] -> [{id: 102, prefix: 'system prompt '}, {id: 103, prefix: ' user '}, {id: 134, prefix: ' describe '}]}
  576. if (slot->images.size() > 0 && !slot->prompt.is_array())
  577. {
  578. std::string prompt = slot->prompt.get<std::string>();
  579. size_t pos = 0, begin_prefix = 0;
  580. std::string pattern = "[img-";
  581. while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
  582. size_t end_prefix = pos;
  583. pos += pattern.length();
  584. size_t end_pos = prompt.find(']', pos);
  585. if (end_pos != std::string::npos)
  586. {
  587. std::string image_id = prompt.substr(pos, end_pos - pos);
  588. try
  589. {
  590. int img_id = std::stoi(image_id);
  591. bool found = false;
  592. for (slot_image &img : slot->images)
  593. {
  594. if (img.id == img_id) {
  595. found = true;
  596. img.prefix_prompt = prompt.substr(begin_prefix, end_prefix - begin_prefix);
  597. begin_prefix = end_pos + 1;
  598. break;
  599. }
  600. }
  601. if (!found) {
  602. LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id);
  603. slot->images.clear();
  604. return false;
  605. }
  606. } catch (const std::invalid_argument& e) {
  607. LOG_TEE("Invalid image number id in prompt\n");
  608. slot->images.clear();
  609. return false;
  610. }
  611. }
  612. }
  613. slot->prompt = "";
  614. slot->params.input_suffix = prompt.substr(begin_prefix);
  615. slot->params.cache_prompt = false; // multimodal doesn't support cache prompt
  616. }
  617. }
  618. }
  619. if (slot->ctx_sampling != nullptr)
  620. {
  621. llama_sampling_free(slot->ctx_sampling);
  622. }
  623. slot->ctx_sampling = llama_sampling_init(slot->sparams);
  624. llama_set_rng_seed(ctx, slot->params.seed);
  625. slot->command = LOAD_PROMPT;
  626. all_slots_are_idle = false;
  627. LOG_TEE("slot %i is processing [task id: %i]\n", slot->id, slot->task_id);
  628. return true;
  629. }
  630. void kv_cache_clear() {
  631. // clear the entire KV cache
  632. llama_kv_cache_clear(ctx);
  633. clean_kv_cache = false;
  634. }
  635. void update_system_prompt() {
  636. system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
  637. llama_batch_clear(batch);
  638. kv_cache_clear();
  639. for (int i = 0; i < (int) system_tokens.size(); ++i)
  640. {
  641. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  642. }
  643. if (llama_decode(ctx, batch) != 0)
  644. {
  645. LOG_TEE("%s: llama_decode() failed\n", __func__);
  646. return;
  647. }
  648. // assign the system KV cache to all parallel sequences
  649. for (int32_t i = 1; i < params.n_parallel; ++i)
  650. {
  651. llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
  652. }
  653. LOG_TEE("system prompt updated\n");
  654. system_need_update = false;
  655. }
  656. void notify_system_prompt_changed() {
  657. // release all slots
  658. for (llama_client_slot &slot : slots)
  659. {
  660. slot.release();
  661. }
  662. system_need_update = true;
  663. }
  664. void process_system_prompt_data(const json &sys_props) {
  665. system_prompt = sys_props.value("prompt", "");
  666. name_user = sys_props.value("anti_prompt", "");
  667. name_assistant = sys_props.value("assistant_name", "");
  668. if (slots.size() > 0)
  669. {
  670. notify_system_prompt_changed();
  671. }
  672. }
  673. static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
  674. const stop_type type, llama_client_slot &slot)
  675. {
  676. size_t stop_pos = std::string::npos;
  677. for (const std::string &word : slot.params.antiprompt)
  678. {
  679. size_t pos;
  680. if (type == STOP_FULL)
  681. {
  682. const size_t tmp = word.size() + last_token_size;
  683. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  684. pos = text.find(word, from_pos);
  685. }
  686. else
  687. {
  688. pos = find_partial_stop_string(word, text);
  689. }
  690. if (pos != std::string::npos &&
  691. (stop_pos == std::string::npos || pos < stop_pos))
  692. {
  693. if (type == STOP_FULL)
  694. {
  695. slot.stopped_word = true;
  696. slot.stopping_word = word;
  697. slot.has_next_token = false;
  698. }
  699. stop_pos = pos;
  700. }
  701. }
  702. return stop_pos;
  703. }
  704. bool process_token(completion_token_output &result, llama_client_slot &slot) {
  705. // remember which tokens were sampled - used for repetition penalties during sampling
  706. const std::string token_str = llama_token_to_piece(ctx, result.tok);
  707. slot.sampled = result.tok;
  708. // search stop word and delete it
  709. slot.generated_text += token_str;
  710. slot.has_next_token = true;
  711. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
  712. {
  713. // we can change penalty_prompt_tokens because it is always created from scratch each request
  714. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  715. }
  716. // check if there is incomplete UTF-8 character at the end
  717. bool incomplete = false;
  718. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
  719. {
  720. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  721. if ((c & 0xC0) == 0x80)
  722. {
  723. // continuation byte: 10xxxxxx
  724. continue;
  725. }
  726. if ((c & 0xE0) == 0xC0)
  727. {
  728. // 2-byte character: 110xxxxx ...
  729. incomplete = i < 2;
  730. }
  731. else if ((c & 0xF0) == 0xE0)
  732. {
  733. // 3-byte character: 1110xxxx ...
  734. incomplete = i < 3;
  735. }
  736. else if ((c & 0xF8) == 0xF0)
  737. {
  738. // 4-byte character: 11110xxx ...
  739. incomplete = i < 4;
  740. }
  741. // else 1-byte character or invalid byte
  742. break;
  743. }
  744. if (!incomplete)
  745. {
  746. size_t pos = std::min(slot.sent_count, slot.generated_text.size());
  747. const std::string str_test = slot.generated_text.substr(pos);
  748. bool is_stop_full = false;
  749. size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot);
  750. if (stop_pos != std::string::npos)
  751. {
  752. is_stop_full = true;
  753. slot.generated_text.erase(
  754. slot.generated_text.begin() + pos + stop_pos,
  755. slot.generated_text.end());
  756. pos = std::min(slot.sent_count, slot.generated_text.size());
  757. }
  758. else
  759. {
  760. is_stop_full = false;
  761. stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
  762. }
  763. // check if there is any token to predict
  764. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0))
  765. {
  766. // no send the stop word in the response
  767. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  768. slot.sent_count += result.text_to_send.size();
  769. // add the token to slot queue and cache
  770. }
  771. slot.add_token_string(result);
  772. if (slot.params.stream)
  773. {
  774. send_partial_response(slot, result);
  775. }
  776. }
  777. if (incomplete)
  778. {
  779. slot.has_next_token = true;
  780. }
  781. // check the limits
  782. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
  783. {
  784. slot.stopped_limit = true;
  785. slot.has_next_token = false;
  786. }
  787. if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
  788. {
  789. slot.stopped_eos = true;
  790. slot.has_next_token = false;
  791. LOG_VERBOSE("eos token found", {});
  792. }
  793. LOG_VERBOSE("next token", {
  794. {"token", result.tok},
  795. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  796. {"has_next_token", slot.has_next_token},
  797. {"n_remain", slot.n_remaining},
  798. {"num_tokens_predicted", slot.n_decoded},
  799. {"stopped_eos", slot.stopped_eos},
  800. {"stopped_word", slot.stopped_word},
  801. {"stopped_limit", slot.stopped_limit},
  802. {"stopping_word", slot.stopping_word},
  803. });
  804. return slot.has_next_token; // continue
  805. }
  806. bool process_images(llama_client_slot &slot) const
  807. {
  808. for (slot_image &img : slot.images)
  809. {
  810. if (!img.request_encode_image)
  811. {
  812. continue;
  813. }
  814. clip_image_f32 * img_res = clip_image_f32_init();
  815. if (!clip_image_preprocess(clp_ctx, img.img_data, img_res, /*pad2square =*/ true))
  816. {
  817. LOG_TEE("Error processing the given image");
  818. clip_free(clp_ctx);
  819. return false;
  820. }
  821. img.image_tokens = clip_n_patches(clp_ctx);
  822. img.image_embedding = (float *)malloc(clip_embd_nbytes(clp_ctx));
  823. if (!img.image_embedding)
  824. {
  825. LOG_TEE("Unable to allocate memory for image embeddings\n");
  826. clip_free(clp_ctx);
  827. return false;
  828. }
  829. LOG_TEE("slot %i - encoding image [id: %i]\n", slot.id, img.id);
  830. if (!clip_image_encode(clp_ctx, params.n_threads, img_res, img.image_embedding))
  831. {
  832. LOG_TEE("Unable to encode image\n");
  833. return false;
  834. }
  835. clip_image_f32_free(img_res);
  836. img.request_encode_image = false;
  837. }
  838. return slot.images.size() > 0;
  839. }
  840. void send_error(task_server& task, const std::string &error)
  841. {
  842. LOG_TEE("task %i - error: %s\n", task.id, error.c_str());
  843. task_result res;
  844. res.id = task.id;
  845. res.multitask_id = task.multitask_id;
  846. res.stop = false;
  847. res.error = true;
  848. res.result_json = { { "content", error } };
  849. queue_results.send(res);
  850. }
  851. json get_model_props()
  852. {
  853. return get_formated_generation(slots[0]);
  854. }
  855. json get_formated_generation(llama_client_slot &slot)
  856. {
  857. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  858. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
  859. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  860. return json {
  861. {"n_ctx", slot.n_ctx},
  862. {"model", params.model_alias},
  863. {"seed", slot.params.seed},
  864. {"temperature", slot.sparams.temp},
  865. {"top_k", slot.sparams.top_k},
  866. {"top_p", slot.sparams.top_p},
  867. {"min_p", slot.sparams.min_p},
  868. {"tfs_z", slot.sparams.tfs_z},
  869. {"typical_p", slot.sparams.typical_p},
  870. {"repeat_last_n", slot.sparams.penalty_last_n},
  871. {"repeat_penalty", slot.sparams.penalty_repeat},
  872. {"presence_penalty", slot.sparams.penalty_present},
  873. {"frequency_penalty", slot.sparams.penalty_freq},
  874. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  875. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  876. {"mirostat", slot.sparams.mirostat},
  877. {"mirostat_tau", slot.sparams.mirostat_tau},
  878. {"mirostat_eta", slot.sparams.mirostat_eta},
  879. {"penalize_nl", slot.sparams.penalize_nl},
  880. {"stop", slot.params.antiprompt},
  881. {"n_predict", slot.params.n_predict},
  882. {"n_keep", params.n_keep},
  883. {"ignore_eos", ignore_eos},
  884. {"stream", slot.params.stream},
  885. {"logit_bias", slot.sparams.logit_bias},
  886. {"n_probs", slot.sparams.n_probs},
  887. {"grammar", slot.sparams.grammar},
  888. };
  889. }
  890. void send_partial_response(llama_client_slot &slot, completion_token_output tkn)
  891. {
  892. task_result res;
  893. res.id = slot.task_id;
  894. res.multitask_id = slot.multitask_id;
  895. res.error = false;
  896. res.stop = false;
  897. res.result_json = json
  898. {
  899. {"content", tkn.text_to_send},
  900. {"stop", false},
  901. {"slot_id", slot.id},
  902. {"multimodal", multimodal}
  903. };
  904. if (slot.sparams.n_probs > 0)
  905. {
  906. std::vector<completion_token_output> probs_output = {};
  907. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  908. size_t probs_pos = std::min(slot.sent_token_probs_index, slot.generated_token_probs.size());
  909. size_t probs_stop_pos = std::min(slot.sent_token_probs_index + to_send_toks.size(), slot.generated_token_probs.size());
  910. if (probs_pos < probs_stop_pos)
  911. {
  912. probs_output = std::vector<completion_token_output>(slot.generated_token_probs.begin() + probs_pos, slot.generated_token_probs.begin() + probs_stop_pos);
  913. }
  914. slot.sent_token_probs_index = probs_stop_pos;
  915. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  916. }
  917. if (slot.oaicompat)
  918. {
  919. res.result_json["oaicompat_token_ctr"] = slot.n_decoded;
  920. res.result_json["model"] = slot.oaicompat_model;
  921. }
  922. queue_results.send(res);
  923. }
  924. void send_final_response(llama_client_slot &slot)
  925. {
  926. task_result res;
  927. res.id = slot.task_id;
  928. res.multitask_id = slot.multitask_id;
  929. res.error = false;
  930. res.stop = true;
  931. res.result_json = json
  932. {
  933. {"content", !slot.params.stream ? slot.generated_text : ""},
  934. {"slot_id", slot.id},
  935. {"stop", true},
  936. {"model", params.model_alias},
  937. {"tokens_predicted", slot.n_decoded},
  938. {"tokens_evaluated", slot.num_prompt_tokens},
  939. {"generation_settings", get_formated_generation(slot)},
  940. {"prompt", slot.prompt},
  941. {"truncated", slot.truncated},
  942. {"stopped_eos", slot.stopped_eos},
  943. {"stopped_word", slot.stopped_word},
  944. {"stopped_limit", slot.stopped_limit},
  945. {"stopping_word", slot.stopping_word},
  946. {"tokens_cached", slot.n_past},
  947. {"timings", slot.get_formated_timings()}
  948. };
  949. if (slot.sparams.n_probs > 0)
  950. {
  951. std::vector<completion_token_output> probs = {};
  952. if (!slot.params.stream && slot.stopped_word)
  953. {
  954. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  955. probs = std::vector<completion_token_output>(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size());
  956. }
  957. else
  958. {
  959. probs = std::vector<completion_token_output>(
  960. slot.generated_token_probs.begin(),
  961. slot.generated_token_probs.end());
  962. }
  963. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  964. }
  965. if (slot.oaicompat)
  966. {
  967. res.result_json["oaicompat_token_ctr"] = slot.n_decoded;
  968. res.result_json["model"] = slot.oaicompat_model;
  969. }
  970. queue_results.send(res);
  971. }
  972. void send_embedding(llama_client_slot &slot)
  973. {
  974. task_result res;
  975. res.id = slot.task_id;
  976. res.multitask_id = slot.multitask_id;
  977. res.error = false;
  978. res.stop = true;
  979. const int n_embd = llama_n_embd(model);
  980. if (!params.embedding)
  981. {
  982. LOG_WARNING("embedding disabled", {
  983. {"params.embedding", params.embedding},
  984. });
  985. res.result_json = json
  986. {
  987. {"embedding", std::vector<float>(n_embd, 0.0f)},
  988. };
  989. }
  990. else
  991. {
  992. const float *data = llama_get_embeddings(ctx);
  993. std::vector<float> embedding(data, data + n_embd);
  994. res.result_json = json
  995. {
  996. {"embedding", embedding },
  997. };
  998. }
  999. queue_results.send(res);
  1000. }
  1001. void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
  1002. {
  1003. task_server task;
  1004. task.id = task_id;
  1005. task.target_id = 0;
  1006. task.data = std::move(data);
  1007. task.infill_mode = infill;
  1008. task.embedding_mode = embedding;
  1009. task.type = TASK_TYPE_COMPLETION;
  1010. task.multitask_id = multitask_id;
  1011. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1012. if (task.data.count("prompt") && task.data.at("prompt").size() > 1)
  1013. {
  1014. split_multiprompt_task(task_id, task);
  1015. }
  1016. // otherwise, it's a single-prompt task, we actually queue it
  1017. queue_tasks.post(task);
  1018. }
  1019. // for multiple images processing
  1020. bool ingest_images(llama_client_slot &slot, int n_batch)
  1021. {
  1022. int image_idx = 0;
  1023. while (image_idx < (int) slot.images.size())
  1024. {
  1025. slot_image &img = slot.images[image_idx];
  1026. // process prefix prompt
  1027. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1028. {
  1029. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1030. llama_batch batch_view = {
  1031. n_tokens,
  1032. batch.token + i,
  1033. nullptr,
  1034. batch.pos + i,
  1035. batch.n_seq_id + i,
  1036. batch.seq_id + i,
  1037. batch.logits + i,
  1038. 0, 0, 0, // unused
  1039. };
  1040. if (llama_decode(ctx, batch_view))
  1041. {
  1042. LOG_TEE("%s : failed to eval\n", __func__);
  1043. return false;
  1044. }
  1045. }
  1046. // process image with llm
  1047. for (int i = 0; i < img.image_tokens; i += n_batch)
  1048. {
  1049. int n_eval = img.image_tokens - i;
  1050. if (n_eval > n_batch)
  1051. {
  1052. n_eval = n_batch;
  1053. }
  1054. const int n_embd = llama_n_embd(model);
  1055. llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, slot.n_past, 1, 0, };
  1056. if (llama_decode(ctx, batch_img))
  1057. {
  1058. LOG_TEE("%s : failed to eval image\n", __func__);
  1059. return false;
  1060. }
  1061. slot.n_past += n_eval;
  1062. }
  1063. image_idx++;
  1064. llama_batch_clear(batch);
  1065. // append prefix of next image
  1066. const auto json_prompt = (image_idx >= (int) slot.images.size()) ?
  1067. slot.params.input_suffix : // no more images, then process suffix prompt
  1068. (json)(slot.images[image_idx].prefix_prompt);
  1069. std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
  1070. for (int i = 0; i < (int) append_tokens.size(); ++i)
  1071. {
  1072. llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true);
  1073. slot.n_past += 1;
  1074. }
  1075. }
  1076. return true;
  1077. }
  1078. void request_cancel(int task_id)
  1079. {
  1080. task_server task;
  1081. task.type = TASK_TYPE_CANCEL;
  1082. task.target_id = task_id;
  1083. queue_tasks.post(task);
  1084. }
  1085. void split_multiprompt_task(int multitask_id, task_server& multiprompt_task)
  1086. {
  1087. int prompt_count = multiprompt_task.data.at("prompt").size();
  1088. assert(prompt_count > 1);
  1089. // generate all the ID for subtask
  1090. std::vector<int> subtask_ids(prompt_count);
  1091. for (int i = 0; i < prompt_count; i++)
  1092. {
  1093. subtask_ids[i] = queue_tasks.get_new_id();
  1094. }
  1095. // queue up the multitask so we can track its subtask progression
  1096. queue_tasks.add_multitask(multitask_id, subtask_ids);
  1097. // add subtasks
  1098. for (int i = 0; i < prompt_count; i++)
  1099. {
  1100. json subtask_data = multiprompt_task.data;
  1101. subtask_data["prompt"] = subtask_data["prompt"][i];
  1102. // subtasks inherit everything else (infill mode, embedding mode, etc.)
  1103. request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
  1104. }
  1105. }
  1106. void process_single_task(task_server& task)
  1107. {
  1108. switch (task.type)
  1109. {
  1110. case TASK_TYPE_COMPLETION: {
  1111. llama_client_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
  1112. if (slot == nullptr)
  1113. {
  1114. // if no slot is available, we defer this task for processing later
  1115. LOG_VERBOSE("no slot is available", {});
  1116. queue_tasks.defer(task);
  1117. break;
  1118. }
  1119. if (task.data.contains("system_prompt"))
  1120. {
  1121. if (!all_slots_are_idle) {
  1122. send_error(task, "system prompt can only be updated when all slots are idle");
  1123. break;
  1124. }
  1125. process_system_prompt_data(task.data["system_prompt"]);
  1126. // reset cache_tokens for all slots
  1127. for (llama_client_slot &slot : slots)
  1128. {
  1129. slot.cache_tokens.clear();
  1130. slot.n_past = 0;
  1131. slot.n_past_se = 0;
  1132. }
  1133. }
  1134. slot->reset();
  1135. slot->infill = task.infill_mode;
  1136. slot->embedding = task.embedding_mode;
  1137. slot->task_id = task.id;
  1138. slot->multitask_id = task.multitask_id;
  1139. if (!launch_slot_with_data(slot, task.data))
  1140. {
  1141. // send error result
  1142. send_error(task, "internal_error");
  1143. break;
  1144. }
  1145. } break;
  1146. case TASK_TYPE_CANCEL: { // release slot linked with the task id
  1147. for (auto & slot : slots)
  1148. {
  1149. if (slot.task_id == task.target_id)
  1150. {
  1151. slot.release();
  1152. break;
  1153. }
  1154. }
  1155. } break;
  1156. case TASK_TYPE_NEXT_RESPONSE: {
  1157. // do nothing
  1158. } break;
  1159. }
  1160. }
  1161. void on_finish_multitask(task_multi& multitask)
  1162. {
  1163. // all subtasks done == multitask is done
  1164. task_result result;
  1165. result.id = multitask.id;
  1166. result.stop = true;
  1167. result.error = false;
  1168. // collect json results into one json result
  1169. std::vector<json> result_jsons;
  1170. for (auto& subres : multitask.results)
  1171. {
  1172. result_jsons.push_back(subres.result_json);
  1173. result.error = result.error && subres.error;
  1174. }
  1175. result.result_json = json{ { "results", result_jsons } };
  1176. queue_results.send(result);
  1177. }
  1178. bool update_slots() {
  1179. if (system_need_update)
  1180. {
  1181. LOG_TEE("updating system prompt\n");
  1182. update_system_prompt();
  1183. }
  1184. llama_batch_clear(batch);
  1185. if (all_slots_are_idle)
  1186. {
  1187. if (system_prompt.empty() && clean_kv_cache)
  1188. {
  1189. LOG_TEE("all slots are idle and system prompt is empty, clear the KV cache\n");
  1190. kv_cache_clear();
  1191. }
  1192. return true;
  1193. }
  1194. task_server task;
  1195. task.type = TASK_TYPE_NEXT_RESPONSE;
  1196. task.target_id = -1;
  1197. queue_tasks.post(task);
  1198. for (llama_client_slot &slot : slots)
  1199. {
  1200. if (slot.ga_n == 1)
  1201. {
  1202. if (slot.is_processing() && system_tokens.size() + slot.cache_tokens.size() >= (size_t) slot.n_ctx)
  1203. {
  1204. // Shift context
  1205. const int n_left = system_tokens.size() + slot.n_past - slot.params.n_keep - 1;
  1206. const int n_discard = n_left / 2;
  1207. LOG_TEE("slot %d: context shift - n_keep = %d, n_left = %d, n_discard = %d\n", slot.id, slot.params.n_keep, n_left, n_discard);
  1208. llama_kv_cache_seq_rm (ctx, slot.id, slot.params.n_keep + 1 , slot.params.n_keep + n_discard + 1);
  1209. llama_kv_cache_seq_shift(ctx, slot.id, slot.params.n_keep + 1 + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1210. for (size_t i = slot.params.n_keep + 1 + n_discard; i < slot.cache_tokens.size(); i++)
  1211. {
  1212. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1213. }
  1214. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1215. slot.n_past -= n_discard;
  1216. slot.truncated = true;
  1217. LOG_VERBOSE("context shift", {
  1218. { "n_ctx", n_ctx },
  1219. { "n_keep", params.n_keep },
  1220. { "n_left", n_left },
  1221. });
  1222. }
  1223. }
  1224. }
  1225. // decode any currently ongoing sequences
  1226. for (auto & slot : slots)
  1227. {
  1228. // release the slot
  1229. if (slot.command == RELEASE)
  1230. {
  1231. slot.state = IDLE;
  1232. slot.command = NONE;
  1233. slot.t_last_used = ggml_time_us();
  1234. LOG_TEE("slot %d released (%d tokens in cache)\n", slot.id, (int) slot.cache_tokens.size());
  1235. queue_tasks.notify_slot_changed();
  1236. continue;
  1237. }
  1238. if (slot.state == IDLE)
  1239. {
  1240. continue;
  1241. }
  1242. slot.i_batch = batch.n_tokens;
  1243. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1244. // TODO: we always have to take into account the "system_tokens"
  1245. // this is not great and needs to be improved somehow
  1246. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true);
  1247. slot.n_past += 1;
  1248. }
  1249. // process in chunks of params.n_batch
  1250. int32_t n_batch = params.n_batch;
  1251. // assign workload to the slots
  1252. if (params.cont_batching || batch.n_tokens == 0)
  1253. {
  1254. for (auto & slot : slots)
  1255. {
  1256. const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
  1257. // empty prompt passed -> release the slot and send empty response
  1258. // note: infill mode allows empty prompt
  1259. if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
  1260. {
  1261. slot.release();
  1262. slot.print_timings();
  1263. send_final_response(slot);
  1264. continue;
  1265. }
  1266. // need process the prompt
  1267. if (slot.state == IDLE && slot.command == LOAD_PROMPT)
  1268. {
  1269. slot.state = PROCESSING;
  1270. slot.command = NONE;
  1271. std::vector<llama_token> prompt_tokens;
  1272. slot.t_start_process_prompt = ggml_time_us();
  1273. slot.t_start_genereration = 0;
  1274. if (slot.infill)
  1275. {
  1276. bool suff_rm_leading_spc = true;
  1277. if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
  1278. {
  1279. params.input_suffix.erase(0, 1);
  1280. suff_rm_leading_spc = false;
  1281. }
  1282. auto prefix_tokens = tokenize(slot.params.input_prefix, false);
  1283. auto suffix_tokens = tokenize(slot.params.input_suffix, false);
  1284. const int space_token = 29871; // TODO: this should not be hardcoded
  1285. if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
  1286. suffix_tokens.erase(suffix_tokens.begin());
  1287. }
  1288. prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
  1289. prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
  1290. prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
  1291. prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
  1292. prefix_tokens.push_back(llama_token_middle(model));
  1293. prompt_tokens = prefix_tokens;
  1294. }
  1295. else
  1296. {
  1297. prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
  1298. }
  1299. slot.num_prompt_tokens = prompt_tokens.size();
  1300. if (slot.params.n_keep < 0)
  1301. {
  1302. slot.params.n_keep = slot.num_prompt_tokens;
  1303. }
  1304. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1305. // if input prompt is too big, truncate it
  1306. if (slot.num_prompt_tokens >= slot.n_ctx)
  1307. {
  1308. const int n_left = slot.n_ctx - slot.params.n_keep;
  1309. const int n_block_size = n_left / 2;
  1310. const int erased_blocks = (slot.num_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1311. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + slot.params.n_keep);
  1312. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, prompt_tokens.end());
  1313. LOG_VERBOSE("input truncated", {
  1314. {"n_ctx", slot.n_ctx},
  1315. {"n_keep", slot.params.n_keep},
  1316. {"n_left", n_left},
  1317. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  1318. });
  1319. slot.truncated = true;
  1320. prompt_tokens = new_tokens;
  1321. slot.num_prompt_tokens = prompt_tokens.size();
  1322. GGML_ASSERT(slot.num_prompt_tokens < slot.n_ctx);
  1323. }
  1324. if (!slot.params.cache_prompt)
  1325. {
  1326. llama_sampling_reset(slot.ctx_sampling);
  1327. slot.n_past = 0;
  1328. slot.n_past_se = 0;
  1329. slot.ga_i = 0;
  1330. slot.num_prompt_tokens_processed = slot.num_prompt_tokens;
  1331. }
  1332. else
  1333. {
  1334. // push the prompt into the sampling context (do not apply grammar)
  1335. for (auto &token : prompt_tokens)
  1336. {
  1337. llama_sampling_accept(slot.ctx_sampling, ctx, token, false);
  1338. }
  1339. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1340. slot.num_prompt_tokens_processed = slot.num_prompt_tokens - slot.n_past;
  1341. if (slot.ga_n != 1)
  1342. {
  1343. int ga_i = 0;
  1344. int32_t ga_n = slot.ga_n;
  1345. int32_t ga_w = slot.ga_w;
  1346. int32_t slot_npast = 0;
  1347. for (int k = 0; k < slot.n_past; ++k)
  1348. {
  1349. while (slot_npast >= ga_i + ga_w) {
  1350. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1351. slot_npast -= bd;
  1352. ga_i += ga_w/ga_n;
  1353. }
  1354. slot_npast++;
  1355. }
  1356. slot.n_past_se = slot_npast;
  1357. slot.ga_i = ga_i;
  1358. }
  1359. LOG_TEE("slot %d : in cache: %i tokens | to process: %i tokens\n", slot.id, slot.n_past, slot.num_prompt_tokens_processed);
  1360. }
  1361. LOG_TEE("slot %d : kv cache rm - [%d, end)\n", slot.id, (int) system_tokens.size() + slot.n_past);
  1362. llama_kv_cache_seq_rm(ctx, slot.id, system_tokens.size() + slot.n_past, -1);
  1363. slot.cache_tokens = prompt_tokens;
  1364. if (slot.n_past == slot.num_prompt_tokens && slot.n_past > 0)
  1365. {
  1366. // we have to evaluate at least 1 token to generate logits.
  1367. LOG_TEE("slot %d : we have to evaluate at least 1 token to generate logits\n", slot.id);
  1368. slot.n_past--;
  1369. if (slot.ga_i > 0)
  1370. {
  1371. slot.n_past_se--;
  1372. }
  1373. }
  1374. LOG_VERBOSE("prompt ingested", {
  1375. {"n_past", slot.n_past},
  1376. {"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
  1377. {"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
  1378. });
  1379. const bool has_images = process_images(slot);
  1380. // process the prefix of first image
  1381. std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
  1382. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1383. int32_t ga_i = slot.ga_i;
  1384. int32_t ga_n = slot.ga_n;
  1385. int32_t ga_w = slot.ga_w;
  1386. for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
  1387. {
  1388. if (slot.ga_n != 1)
  1389. {
  1390. while (slot_npast >= ga_i + ga_w) {
  1391. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1392. slot_npast -= bd;
  1393. ga_i += ga_w/ga_n;
  1394. }
  1395. }
  1396. llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, {slot.id }, false);
  1397. slot_npast++;
  1398. }
  1399. if (has_images && !ingest_images(slot, n_batch))
  1400. {
  1401. LOG_TEE("failed processing images\n");
  1402. return false;
  1403. }
  1404. // extract the logits only for the last token
  1405. if (batch.n_tokens > 0)
  1406. {
  1407. batch.logits[batch.n_tokens - 1] = true;
  1408. }
  1409. slot.n_decoded = 0;
  1410. slot.i_batch = batch.n_tokens - 1;
  1411. }
  1412. }
  1413. }
  1414. if (batch.n_tokens == 0)
  1415. {
  1416. all_slots_are_idle = true;
  1417. return true;
  1418. }
  1419. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1420. {
  1421. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1422. for (auto & slot : slots)
  1423. {
  1424. if (slot.ga_n != 1)
  1425. {
  1426. // context extension via Self-Extend
  1427. while (slot.n_past_se >= slot.ga_i + slot.ga_w)
  1428. {
  1429. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1430. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1431. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1432. LOG_TEE("\n");
  1433. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1434. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1435. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1436. llama_kv_cache_seq_shift(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd);
  1437. llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n);
  1438. llama_kv_cache_seq_shift(ctx, slot.id, slot.ga_i + ib * bd + slot.ga_w,slot.n_past_se + ib * bd, dd);
  1439. slot.n_past_se -= bd;
  1440. slot.ga_i += slot.ga_w / slot.ga_n;
  1441. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1442. }
  1443. slot.n_past_se += n_tokens;
  1444. }
  1445. }
  1446. llama_batch batch_view =
  1447. {
  1448. n_tokens,
  1449. batch.token + i,
  1450. nullptr,
  1451. batch.pos + i,
  1452. batch.n_seq_id + i,
  1453. batch.seq_id + i,
  1454. batch.logits + i,
  1455. 0, 0, 0, // unused
  1456. };
  1457. const int ret = llama_decode(ctx, batch_view);
  1458. if (ret != 0)
  1459. {
  1460. if (n_batch == 1 || ret < 0)
  1461. {
  1462. // if you get here, it means the KV cache is full - try increasing it via the context size
  1463. LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
  1464. return false;
  1465. }
  1466. LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2);
  1467. // retry with half the batch size to try to find a free slot in the KV cache
  1468. n_batch /= 2;
  1469. i -= n_batch;
  1470. continue;
  1471. }
  1472. for (auto & slot : slots)
  1473. {
  1474. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
  1475. {
  1476. continue;
  1477. }
  1478. // prompt evaluated for embedding
  1479. if (slot.embedding)
  1480. {
  1481. send_embedding(slot);
  1482. slot.release();
  1483. slot.i_batch = -1;
  1484. return true;
  1485. }
  1486. completion_token_output result;
  1487. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1488. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1489. slot.n_decoded += 1;
  1490. if (slot.n_decoded == 1)
  1491. {
  1492. slot.t_start_genereration = ggml_time_us();
  1493. slot.t_prompt_processing = (slot.t_start_genereration - slot.t_start_process_prompt) / 1e3;
  1494. }
  1495. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1496. result.tok = id;
  1497. const int32_t n_probs = slot.sparams.n_probs;
  1498. if (slot.sparams.temp <= 0 && n_probs > 0)
  1499. {
  1500. // for llama_sample_token_greedy we need to sort candidates
  1501. llama_sample_softmax(ctx, &cur_p);
  1502. }
  1503. for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
  1504. {
  1505. result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
  1506. }
  1507. if (!process_token(result, slot))
  1508. {
  1509. slot.release();
  1510. slot.print_timings();
  1511. send_final_response(slot);
  1512. }
  1513. slot.i_batch = -1;
  1514. }
  1515. }
  1516. return true;
  1517. }
  1518. void run_on_all_tasks_finished() {
  1519. update_slots();
  1520. }
  1521. };
  1522. static void server_print_usage(const char *argv0, const gpt_params &params,
  1523. const server_params &sparams)
  1524. {
  1525. printf("usage: %s [options]\n", argv0);
  1526. printf("\n");
  1527. printf("options:\n");
  1528. printf(" -h, --help show this help message and exit\n");
  1529. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  1530. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  1531. printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
  1532. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  1533. printf(" --rope-scaling {none,linear,yarn}\n");
  1534. printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
  1535. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  1536. printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
  1537. printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
  1538. printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
  1539. printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
  1540. printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
  1541. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  1542. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  1543. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  1544. if (llama_mlock_supported())
  1545. {
  1546. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  1547. }
  1548. if (llama_mmap_supported())
  1549. {
  1550. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  1551. }
  1552. printf(" --numa attempt optimizations that help on some NUMA systems\n");
  1553. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  1554. printf(" -ngl N, --n-gpu-layers N\n");
  1555. printf(" number of layers to store in VRAM\n");
  1556. printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
  1557. printf(" how to split the model across multiple GPUs, one of:\n");
  1558. printf(" - none: use one GPU only\n");
  1559. printf(" - layer (default): split layers and KV across GPUs\n");
  1560. printf(" - row: split rows across GPUs\n");
  1561. printf(" -ts SPLIT --tensor-split SPLIT\n");
  1562. printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
  1563. printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
  1564. printf(" or for intermediate results and KV (with split-mode = row)\n");
  1565. #endif
  1566. printf(" -m FNAME, --model FNAME\n");
  1567. printf(" model path (default: %s)\n", params.model.c_str());
  1568. printf(" -a ALIAS, --alias ALIAS\n");
  1569. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  1570. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  1571. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  1572. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  1573. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  1574. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  1575. printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
  1576. printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
  1577. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  1578. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  1579. printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
  1580. printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
  1581. printf(" -spf FNAME, --system-prompt-file FNAME\n");
  1582. printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
  1583. printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
  1584. printf(" --log-disable disables logging to a file.\n");
  1585. printf("\n");
  1586. printf(" --override-kv KEY=TYPE:VALUE\n");
  1587. printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
  1588. printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
  1589. printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
  1590. printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
  1591. printf("\n");
  1592. }
  1593. static void server_params_parse(int argc, char **argv, server_params &sparams,
  1594. gpt_params &params, llama_server_context& llama)
  1595. {
  1596. gpt_params default_params;
  1597. server_params default_sparams;
  1598. std::string arg;
  1599. bool invalid_param = false;
  1600. for (int i = 1; i < argc; i++)
  1601. {
  1602. arg = argv[i];
  1603. if (arg == "--port")
  1604. {
  1605. if (++i >= argc)
  1606. {
  1607. invalid_param = true;
  1608. break;
  1609. }
  1610. sparams.port = std::stoi(argv[i]);
  1611. }
  1612. else if (arg == "--host")
  1613. {
  1614. if (++i >= argc)
  1615. {
  1616. invalid_param = true;
  1617. break;
  1618. }
  1619. sparams.hostname = argv[i];
  1620. }
  1621. else if (arg == "--path")
  1622. {
  1623. if (++i >= argc)
  1624. {
  1625. invalid_param = true;
  1626. break;
  1627. }
  1628. sparams.public_path = argv[i];
  1629. }
  1630. else if (arg == "--api-key")
  1631. {
  1632. if (++i >= argc)
  1633. {
  1634. invalid_param = true;
  1635. break;
  1636. }
  1637. sparams.api_keys.push_back(argv[i]);
  1638. }
  1639. else if (arg == "--api-key-file")
  1640. {
  1641. if (++i >= argc)
  1642. {
  1643. invalid_param = true;
  1644. break;
  1645. }
  1646. std::ifstream key_file(argv[i]);
  1647. if (!key_file) {
  1648. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1649. invalid_param = true;
  1650. break;
  1651. }
  1652. std::string key;
  1653. while (std::getline(key_file, key)) {
  1654. if (key.size() > 0) {
  1655. sparams.api_keys.push_back(key);
  1656. }
  1657. }
  1658. key_file.close();
  1659. }
  1660. else if (arg == "--timeout" || arg == "-to")
  1661. {
  1662. if (++i >= argc)
  1663. {
  1664. invalid_param = true;
  1665. break;
  1666. }
  1667. sparams.read_timeout = std::stoi(argv[i]);
  1668. sparams.write_timeout = std::stoi(argv[i]);
  1669. }
  1670. else if (arg == "-m" || arg == "--model")
  1671. {
  1672. if (++i >= argc)
  1673. {
  1674. invalid_param = true;
  1675. break;
  1676. }
  1677. params.model = argv[i];
  1678. }
  1679. else if (arg == "-a" || arg == "--alias")
  1680. {
  1681. if (++i >= argc)
  1682. {
  1683. invalid_param = true;
  1684. break;
  1685. }
  1686. params.model_alias = argv[i];
  1687. }
  1688. else if (arg == "-h" || arg == "--help")
  1689. {
  1690. server_print_usage(argv[0], default_params, default_sparams);
  1691. exit(0);
  1692. }
  1693. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  1694. {
  1695. if (++i >= argc)
  1696. {
  1697. invalid_param = true;
  1698. break;
  1699. }
  1700. params.n_ctx = std::stoi(argv[i]);
  1701. }
  1702. else if (arg == "--rope-scaling")
  1703. {
  1704. if (++i >= argc)
  1705. {
  1706. invalid_param = true;
  1707. break;
  1708. }
  1709. std::string value(argv[i]);
  1710. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; }
  1711. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; }
  1712. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; }
  1713. else { invalid_param = true; break; }
  1714. }
  1715. else if (arg == "--rope-freq-base")
  1716. {
  1717. if (++i >= argc)
  1718. {
  1719. invalid_param = true;
  1720. break;
  1721. }
  1722. params.rope_freq_base = std::stof(argv[i]);
  1723. }
  1724. else if (arg == "--rope-freq-scale")
  1725. {
  1726. if (++i >= argc)
  1727. {
  1728. invalid_param = true;
  1729. break;
  1730. }
  1731. params.rope_freq_scale = std::stof(argv[i]);
  1732. }
  1733. else if (arg == "--yarn-ext-factor")
  1734. {
  1735. if (++i >= argc) {
  1736. invalid_param = true;
  1737. break;
  1738. }
  1739. params.yarn_ext_factor = std::stof(argv[i]);
  1740. }
  1741. else if (arg == "--yarn-attn-factor")
  1742. {
  1743. if (++i >= argc) {
  1744. invalid_param = true;
  1745. break;
  1746. }
  1747. params.yarn_attn_factor = std::stof(argv[i]);
  1748. }
  1749. else if (arg == "--yarn-beta-fast")
  1750. {
  1751. if (++i >= argc) {
  1752. invalid_param = true;
  1753. break;
  1754. }
  1755. params.yarn_beta_fast = std::stof(argv[i]);
  1756. }
  1757. else if (arg == "--yarn-beta-slow")
  1758. {
  1759. if (++i >= argc) {
  1760. invalid_param = true;
  1761. break;
  1762. }
  1763. params.yarn_beta_slow = std::stof(argv[i]);
  1764. }
  1765. else if (arg == "--threads" || arg == "-t")
  1766. {
  1767. if (++i >= argc)
  1768. {
  1769. invalid_param = true;
  1770. break;
  1771. }
  1772. params.n_threads = std::stoi(argv[i]);
  1773. }
  1774. else if (arg == "--grp-attn-n" || arg == "-gan")
  1775. {
  1776. if (++i >= argc) {
  1777. invalid_param = true;
  1778. break;
  1779. }
  1780. params.grp_attn_n = std::stoi(argv[i]);
  1781. }
  1782. else if (arg == "--grp-attn-w" || arg == "-gaw")
  1783. {
  1784. if (++i >= argc)
  1785. {
  1786. invalid_param = true;
  1787. break;
  1788. }
  1789. params.grp_attn_w = std::stoi(argv[i]);
  1790. }
  1791. else if (arg == "--threads-batch" || arg == "-tb")
  1792. {
  1793. if (++i >= argc)
  1794. {
  1795. invalid_param = true;
  1796. break;
  1797. }
  1798. params.n_threads_batch = std::stoi(argv[i]);
  1799. }
  1800. else if (arg == "-b" || arg == "--batch-size")
  1801. {
  1802. if (++i >= argc)
  1803. {
  1804. invalid_param = true;
  1805. break;
  1806. }
  1807. params.n_batch = std::stoi(argv[i]);
  1808. params.n_batch = std::min(512, params.n_batch);
  1809. }
  1810. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  1811. {
  1812. if (++i >= argc)
  1813. {
  1814. invalid_param = true;
  1815. break;
  1816. }
  1817. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  1818. params.n_gpu_layers = std::stoi(argv[i]);
  1819. #else
  1820. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  1821. "See main README.md for information on enabling GPU BLAS support",
  1822. {{"n_gpu_layers", params.n_gpu_layers}});
  1823. #endif
  1824. }
  1825. else if (arg == "--split-mode" || arg == "-sm")
  1826. {
  1827. if (++i >= argc) {
  1828. invalid_param = true;
  1829. break;
  1830. }
  1831. std::string arg_next = argv[i];
  1832. if (arg_next == "none")
  1833. {
  1834. params.split_mode = LLAMA_SPLIT_NONE;
  1835. }
  1836. else if (arg_next == "layer")
  1837. {
  1838. params.split_mode = LLAMA_SPLIT_LAYER;
  1839. }
  1840. else if (arg_next == "row")
  1841. {
  1842. params.split_mode = LLAMA_SPLIT_ROW;
  1843. }
  1844. else {
  1845. invalid_param = true;
  1846. break;
  1847. }
  1848. #ifndef GGML_USE_CUBLAS
  1849. fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
  1850. #endif // GGML_USE_CUBLAS
  1851. }
  1852. else if (arg == "--tensor-split" || arg == "-ts")
  1853. {
  1854. if (++i >= argc)
  1855. {
  1856. invalid_param = true;
  1857. break;
  1858. }
  1859. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1860. std::string arg_next = argv[i];
  1861. // split string by , and /
  1862. const std::regex regex{R"([,/]+)"};
  1863. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  1864. std::vector<std::string> split_arg{it, {}};
  1865. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  1866. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
  1867. {
  1868. if (i_device < split_arg.size())
  1869. {
  1870. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  1871. }
  1872. else
  1873. {
  1874. params.tensor_split[i_device] = 0.0f;
  1875. }
  1876. }
  1877. #else
  1878. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  1879. #endif // GGML_USE_CUBLAS
  1880. }
  1881. else if (arg == "--no-mul-mat-q" || arg == "-nommq")
  1882. {
  1883. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1884. params.mul_mat_q = false;
  1885. #else
  1886. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
  1887. #endif // GGML_USE_CUBLAS
  1888. }
  1889. else if (arg == "--main-gpu" || arg == "-mg")
  1890. {
  1891. if (++i >= argc)
  1892. {
  1893. invalid_param = true;
  1894. break;
  1895. }
  1896. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1897. params.main_gpu = std::stoi(argv[i]);
  1898. #else
  1899. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  1900. #endif
  1901. }
  1902. else if (arg == "--lora")
  1903. {
  1904. if (++i >= argc)
  1905. {
  1906. invalid_param = true;
  1907. break;
  1908. }
  1909. params.lora_adapter.push_back(std::make_tuple(argv[i], 1.0f));
  1910. params.use_mmap = false;
  1911. }
  1912. else if (arg == "--lora-scaled")
  1913. {
  1914. if (++i >= argc)
  1915. {
  1916. invalid_param = true;
  1917. break;
  1918. }
  1919. const char * lora_adapter = argv[i];
  1920. if (++i >= argc)
  1921. {
  1922. invalid_param = true;
  1923. break;
  1924. }
  1925. params.lora_adapter.push_back(std::make_tuple(lora_adapter, std::stof(argv[i])));
  1926. params.use_mmap = false;
  1927. }
  1928. else if (arg == "--lora-base")
  1929. {
  1930. if (++i >= argc)
  1931. {
  1932. invalid_param = true;
  1933. break;
  1934. }
  1935. params.lora_base = argv[i];
  1936. }
  1937. else if (arg == "-v" || arg == "--verbose")
  1938. {
  1939. #if SERVER_VERBOSE != 1
  1940. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  1941. #else
  1942. server_verbose = true;
  1943. #endif
  1944. }
  1945. else if (arg == "--mlock")
  1946. {
  1947. params.use_mlock = true;
  1948. }
  1949. else if (arg == "--no-mmap")
  1950. {
  1951. params.use_mmap = false;
  1952. }
  1953. else if (arg == "--numa")
  1954. {
  1955. params.numa = true;
  1956. }
  1957. else if (arg == "--embedding")
  1958. {
  1959. params.embedding = true;
  1960. }
  1961. else if (arg == "-cb" || arg == "--cont-batching")
  1962. {
  1963. params.cont_batching = true;
  1964. }
  1965. else if (arg == "-np" || arg == "--parallel")
  1966. {
  1967. if (++i >= argc)
  1968. {
  1969. invalid_param = true;
  1970. break;
  1971. }
  1972. params.n_parallel = std::stoi(argv[i]);
  1973. } else if (arg == "-n" || arg == "--n-predict")
  1974. {
  1975. if (++i >= argc)
  1976. {
  1977. invalid_param = true;
  1978. break;
  1979. }
  1980. params.n_predict = std::stoi(argv[i]);
  1981. } else if (arg == "-spf" || arg == "--system-prompt-file")
  1982. {
  1983. if (++i >= argc)
  1984. {
  1985. invalid_param = true;
  1986. break;
  1987. }
  1988. std::ifstream file(argv[i]);
  1989. if (!file) {
  1990. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1991. invalid_param = true;
  1992. break;
  1993. }
  1994. std::string systm_content;
  1995. std::copy(
  1996. std::istreambuf_iterator<char>(file),
  1997. std::istreambuf_iterator<char>(),
  1998. std::back_inserter(systm_content)
  1999. );
  2000. llama.process_system_prompt_data(json::parse(systm_content));
  2001. }
  2002. else if(arg == "--mmproj")
  2003. {
  2004. if (++i >= argc)
  2005. {
  2006. invalid_param = true;
  2007. break;
  2008. }
  2009. params.mmproj = argv[i];
  2010. }
  2011. else if (arg == "--log-disable")
  2012. {
  2013. log_set_target(stdout);
  2014. LOG_INFO("logging to file is disabled.", {});
  2015. }
  2016. else if (arg == "--override-kv")
  2017. {
  2018. if (++i >= argc) {
  2019. invalid_param = true;
  2020. break;
  2021. }
  2022. char * sep = strchr(argv[i], '=');
  2023. if (sep == nullptr || sep - argv[i] >= 128) {
  2024. fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
  2025. invalid_param = true;
  2026. break;
  2027. }
  2028. struct llama_model_kv_override kvo;
  2029. std::strncpy(kvo.key, argv[i], sep - argv[i]);
  2030. kvo.key[sep - argv[i]] = 0;
  2031. sep++;
  2032. if (strncmp(sep, "int:", 4) == 0) {
  2033. sep += 4;
  2034. kvo.tag = LLAMA_KV_OVERRIDE_INT;
  2035. kvo.int_value = std::atol(sep);
  2036. } else if (strncmp(sep, "float:", 6) == 0) {
  2037. sep += 6;
  2038. kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
  2039. kvo.float_value = std::atof(sep);
  2040. } else if (strncmp(sep, "bool:", 5) == 0) {
  2041. sep += 5;
  2042. kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
  2043. if (std::strcmp(sep, "true") == 0) {
  2044. kvo.bool_value = true;
  2045. } else if (std::strcmp(sep, "false") == 0) {
  2046. kvo.bool_value = false;
  2047. } else {
  2048. fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
  2049. invalid_param = true;
  2050. break;
  2051. }
  2052. } else {
  2053. fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
  2054. invalid_param = true;
  2055. break;
  2056. }
  2057. params.kv_overrides.push_back(kvo);
  2058. }
  2059. else
  2060. {
  2061. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  2062. server_print_usage(argv[0], default_params, default_sparams);
  2063. exit(1);
  2064. }
  2065. }
  2066. if (!params.kv_overrides.empty()) {
  2067. params.kv_overrides.emplace_back(llama_model_kv_override());
  2068. params.kv_overrides.back().key[0] = 0;
  2069. }
  2070. if (invalid_param)
  2071. {
  2072. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  2073. server_print_usage(argv[0], default_params, default_sparams);
  2074. exit(1);
  2075. }
  2076. }
  2077. /* llama.cpp completion api semantics */
  2078. static json format_partial_response(
  2079. llama_server_context &llama, llama_client_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs
  2080. ) {
  2081. json res = json
  2082. {
  2083. {"content", content },
  2084. {"stop", false},
  2085. {"slot_id", slot->id },
  2086. {"multimodal", llama.multimodal }
  2087. };
  2088. if (slot->sparams.n_probs > 0)
  2089. {
  2090. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  2091. }
  2092. return res;
  2093. }
  2094. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  2095. {
  2096. return json{
  2097. {"tokens", tokens}};
  2098. }
  2099. static json format_detokenized_response(std::string content)
  2100. {
  2101. return json{
  2102. {"content", content}};
  2103. }
  2104. static void log_server_request(const httplib::Request &req, const httplib::Response &res)
  2105. {
  2106. LOG_INFO("request", {
  2107. {"remote_addr", req.remote_addr},
  2108. {"remote_port", req.remote_port},
  2109. {"status", res.status},
  2110. {"method", req.method},
  2111. {"path", req.path},
  2112. {"params", req.params},
  2113. });
  2114. LOG_VERBOSE("request", {
  2115. {"request", req.body},
  2116. {"response", res.body},
  2117. });
  2118. }
  2119. struct token_translator
  2120. {
  2121. llama_context * ctx;
  2122. std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
  2123. std::string operator()(const completion_token_output &cto) const { return (*this)(cto.tok); }
  2124. };
  2125. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama, llama_client_slot *slot)
  2126. {
  2127. auto & gtps = slot->generated_token_probs;
  2128. auto translator = token_translator{llama.ctx};
  2129. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  2130. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  2131. if (slot->generated_text.capacity() < slot->generated_text.size() + len)
  2132. {
  2133. slot->generated_text.reserve(slot->generated_text.size() + len);
  2134. }
  2135. for (const completion_token_output & cto : gtps)
  2136. {
  2137. slot->generated_text += translator(cto);
  2138. }
  2139. }
  2140. int main(int argc, char **argv)
  2141. {
  2142. #if SERVER_VERBOSE != 1
  2143. log_disable();
  2144. #endif
  2145. // own arguments required by this example
  2146. gpt_params params;
  2147. server_params sparams;
  2148. // struct that contains llama context and inference
  2149. llama_server_context llama;
  2150. server_params_parse(argc, argv, sparams, params, llama);
  2151. if (params.model_alias == "unknown")
  2152. {
  2153. params.model_alias = params.model;
  2154. }
  2155. llama_backend_init(params.numa);
  2156. LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
  2157. {"commit", LLAMA_COMMIT}});
  2158. LOG_INFO("system info", {
  2159. {"n_threads", params.n_threads},
  2160. {"n_threads_batch", params.n_threads_batch},
  2161. {"total_threads", std::thread::hardware_concurrency()},
  2162. {"system_info", llama_print_system_info()},
  2163. });
  2164. httplib::Server svr;
  2165. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2166. svr.set_default_headers({{"Server", "llama.cpp"}});
  2167. // CORS preflight
  2168. svr.Options(R"(.*)", [](const httplib::Request &req, httplib::Response &res) {
  2169. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2170. res.set_header("Access-Control-Allow-Credentials", "true");
  2171. res.set_header("Access-Control-Allow-Methods", "POST");
  2172. res.set_header("Access-Control-Allow-Headers", "*");
  2173. });
  2174. svr.Get("/health", [&](const httplib::Request&, httplib::Response& res) {
  2175. server_state current_state = state.load();
  2176. switch(current_state) {
  2177. case SERVER_STATE_READY:
  2178. res.set_content(R"({"status": "ok"})", "application/json");
  2179. res.status = 200; // HTTP OK
  2180. break;
  2181. case SERVER_STATE_LOADING_MODEL:
  2182. res.set_content(R"({"status": "loading model"})", "application/json");
  2183. res.status = 503; // HTTP Service Unavailable
  2184. break;
  2185. case SERVER_STATE_ERROR:
  2186. res.set_content(R"({"status": "error", "error": "Model failed to load"})", "application/json");
  2187. res.status = 500; // HTTP Internal Server Error
  2188. break;
  2189. }
  2190. });
  2191. svr.set_logger(log_server_request);
  2192. svr.set_exception_handler([](const httplib::Request &, httplib::Response &res, std::exception_ptr ep)
  2193. {
  2194. const char fmt[] = "500 Internal Server Error\n%s";
  2195. char buf[BUFSIZ];
  2196. try
  2197. {
  2198. std::rethrow_exception(std::move(ep));
  2199. }
  2200. catch (std::exception &e)
  2201. {
  2202. snprintf(buf, sizeof(buf), fmt, e.what());
  2203. }
  2204. catch (...)
  2205. {
  2206. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  2207. }
  2208. res.set_content(buf, "text/plain; charset=utf-8");
  2209. res.status = 500;
  2210. });
  2211. svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
  2212. {
  2213. if (res.status == 401)
  2214. {
  2215. res.set_content("Unauthorized", "text/plain; charset=utf-8");
  2216. }
  2217. if (res.status == 400)
  2218. {
  2219. res.set_content("Invalid request", "text/plain; charset=utf-8");
  2220. }
  2221. else if (res.status == 404)
  2222. {
  2223. res.set_content("File Not Found", "text/plain; charset=utf-8");
  2224. res.status = 404;
  2225. }
  2226. });
  2227. // set timeouts and change hostname and port
  2228. svr.set_read_timeout (sparams.read_timeout);
  2229. svr.set_write_timeout(sparams.write_timeout);
  2230. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  2231. {
  2232. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  2233. return 1;
  2234. }
  2235. // Set the base directory for serving static files
  2236. svr.set_base_dir(sparams.public_path);
  2237. // to make it ctrl+clickable:
  2238. LOG_TEE("\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  2239. std::unordered_map<std::string, std::string> log_data;
  2240. log_data["hostname"] = sparams.hostname;
  2241. log_data["port"] = std::to_string(sparams.port);
  2242. if (sparams.api_keys.size() == 1) {
  2243. log_data["api_key"] = "api_key: ****" + sparams.api_keys[0].substr(sparams.api_keys[0].length() - 4);
  2244. } else if (sparams.api_keys.size() > 1) {
  2245. log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
  2246. }
  2247. LOG_INFO("HTTP server listening", log_data);
  2248. // run the HTTP server in a thread - see comment below
  2249. std::thread t([&]()
  2250. {
  2251. if (!svr.listen_after_bind())
  2252. {
  2253. state.store(SERVER_STATE_ERROR);
  2254. return 1;
  2255. }
  2256. return 0;
  2257. });
  2258. // load the model
  2259. if (!llama.load_model(params))
  2260. {
  2261. state.store(SERVER_STATE_ERROR);
  2262. return 1;
  2263. } else {
  2264. llama.initialize();
  2265. state.store(SERVER_STATE_READY);
  2266. LOG_INFO("model loaded", {});
  2267. }
  2268. // Middleware for API key validation
  2269. auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
  2270. // If API key is not set, skip validation
  2271. if (sparams.api_keys.empty()) {
  2272. return true;
  2273. }
  2274. // Check for API key in the header
  2275. auto auth_header = req.get_header_value("Authorization");
  2276. std::string prefix = "Bearer ";
  2277. if (auth_header.substr(0, prefix.size()) == prefix) {
  2278. std::string received_api_key = auth_header.substr(prefix.size());
  2279. if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
  2280. return true; // API key is valid
  2281. }
  2282. }
  2283. // API key is invalid or not provided
  2284. res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8");
  2285. res.status = 401; // Unauthorized
  2286. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2287. return false;
  2288. };
  2289. // this is only called if no index.html is found in the public --path
  2290. svr.Get("/", [](const httplib::Request &, httplib::Response &res)
  2291. {
  2292. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html; charset=utf-8");
  2293. return false;
  2294. });
  2295. // this is only called if no index.js is found in the public --path
  2296. svr.Get("/index.js", [](const httplib::Request &, httplib::Response &res)
  2297. {
  2298. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript; charset=utf-8");
  2299. return false;
  2300. });
  2301. // this is only called if no index.html is found in the public --path
  2302. svr.Get("/completion.js", [](const httplib::Request &, httplib::Response &res)
  2303. {
  2304. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript; charset=utf-8");
  2305. return false;
  2306. });
  2307. // this is only called if no index.html is found in the public --path
  2308. svr.Get("/json-schema-to-grammar.mjs", [](const httplib::Request &, httplib::Response &res)
  2309. {
  2310. res.set_content(reinterpret_cast<const char*>(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript; charset=utf-8");
  2311. return false;
  2312. });
  2313. svr.Get("/props", [&llama](const httplib::Request & req, httplib::Response &res)
  2314. {
  2315. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2316. json data = {
  2317. { "user_name", llama.name_user.c_str() },
  2318. { "assistant_name", llama.name_assistant.c_str() }
  2319. };
  2320. res.set_content(data.dump(), "application/json; charset=utf-8");
  2321. });
  2322. svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2323. {
  2324. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2325. if (!validate_api_key(req, res)) {
  2326. return;
  2327. }
  2328. json data = json::parse(req.body);
  2329. const int task_id = llama.queue_tasks.get_new_id();
  2330. llama.queue_results.add_waiting_task_id(task_id);
  2331. llama.request_completion(task_id, data, false, false, -1);
  2332. if (!json_value(data, "stream", false)) {
  2333. std::string completion_text;
  2334. task_result result = llama.queue_results.recv(task_id);
  2335. if (!result.error && result.stop) {
  2336. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2337. }
  2338. else
  2339. {
  2340. res.status = 404;
  2341. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2342. }
  2343. llama.queue_results.remove_waiting_task_id(task_id);
  2344. } else {
  2345. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink)
  2346. {
  2347. while (true)
  2348. {
  2349. task_result result = llama.queue_results.recv(task_id);
  2350. if (!result.error) {
  2351. const std::string str =
  2352. "data: " +
  2353. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2354. "\n\n";
  2355. LOG_VERBOSE("data stream", {
  2356. { "to_send", str }
  2357. });
  2358. if (!sink.write(str.c_str(), str.size()))
  2359. {
  2360. llama.queue_results.remove_waiting_task_id(task_id);
  2361. return false;
  2362. }
  2363. if (result.stop) {
  2364. break;
  2365. }
  2366. } else {
  2367. const std::string str =
  2368. "error: " +
  2369. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2370. "\n\n";
  2371. LOG_VERBOSE("data stream", {
  2372. { "to_send", str }
  2373. });
  2374. if (!sink.write(str.c_str(), str.size()))
  2375. {
  2376. llama.queue_results.remove_waiting_task_id(task_id);
  2377. return false;
  2378. }
  2379. break;
  2380. }
  2381. }
  2382. llama.queue_results.remove_waiting_task_id(task_id);
  2383. sink.done();
  2384. return true;
  2385. };
  2386. auto on_complete = [task_id, &llama] (bool)
  2387. {
  2388. // cancel
  2389. llama.request_cancel(task_id);
  2390. llama.queue_results.remove_waiting_task_id(task_id);
  2391. };
  2392. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2393. }
  2394. });
  2395. svr.Get("/v1/models", [&params](const httplib::Request& req, httplib::Response& res)
  2396. {
  2397. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2398. std::time_t t = std::time(0);
  2399. json models = {
  2400. {"object", "list"},
  2401. {"data", {
  2402. {
  2403. {"id", params.model_alias},
  2404. {"object", "model"},
  2405. {"created", t},
  2406. {"owned_by", "llamacpp"}
  2407. },
  2408. }}
  2409. };
  2410. res.set_content(models.dump(), "application/json; charset=utf-8");
  2411. });
  2412. // TODO: add mount point without "/v1" prefix -- how?
  2413. svr.Post("/v1/chat/completions", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2414. {
  2415. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2416. if (!validate_api_key(req, res)) {
  2417. return;
  2418. }
  2419. json data = oaicompat_completion_params_parse(json::parse(req.body));
  2420. const int task_id = llama.queue_tasks.get_new_id();
  2421. llama.queue_results.add_waiting_task_id(task_id);
  2422. llama.request_completion(task_id, data, false, false, -1);
  2423. if (!json_value(data, "stream", false)) {
  2424. std::string completion_text;
  2425. task_result result = llama.queue_results.recv(task_id);
  2426. if (!result.error && result.stop) {
  2427. json oaicompat_result = format_final_response_oaicompat(data, result);
  2428. res.set_content(oaicompat_result.dump(-1, ' ', false,
  2429. json::error_handler_t::replace),
  2430. "application/json; charset=utf-8");
  2431. } else {
  2432. res.status = 500;
  2433. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2434. }
  2435. llama.queue_results.remove_waiting_task_id(task_id);
  2436. } else {
  2437. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink &sink) {
  2438. while (true) {
  2439. task_result llama_result = llama.queue_results.recv(task_id);
  2440. if (!llama_result.error) {
  2441. std::vector<json> result_array = format_partial_response_oaicompat( llama_result);
  2442. for (auto it = result_array.begin(); it != result_array.end(); ++it)
  2443. {
  2444. if (!it->empty()) {
  2445. const std::string str =
  2446. "data: " +
  2447. it->dump(-1, ' ', false, json::error_handler_t::replace) +
  2448. "\n\n";
  2449. LOG_VERBOSE("data stream", {{"to_send", str}});
  2450. if (!sink.write(str.c_str(), str.size())) {
  2451. llama.queue_results.remove_waiting_task_id(task_id);
  2452. return false;
  2453. }
  2454. }
  2455. }
  2456. if (llama_result.stop) {
  2457. break;
  2458. }
  2459. } else {
  2460. const std::string str =
  2461. "error: " +
  2462. llama_result.result_json.dump(-1, ' ', false,
  2463. json::error_handler_t::replace) +
  2464. "\n\n";
  2465. LOG_VERBOSE("data stream", {{"to_send", str}});
  2466. if (!sink.write(str.c_str(), str.size())) {
  2467. llama.queue_results.remove_waiting_task_id(task_id);
  2468. return false;
  2469. }
  2470. break;
  2471. }
  2472. }
  2473. sink.done();
  2474. llama.queue_results.remove_waiting_task_id(task_id);
  2475. return true;
  2476. };
  2477. auto on_complete = [task_id, &llama](bool) {
  2478. // cancel request
  2479. llama.request_cancel(task_id);
  2480. llama.queue_results.remove_waiting_task_id(task_id);
  2481. };
  2482. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2483. }
  2484. });
  2485. svr.Post("/infill", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2486. {
  2487. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2488. if (!validate_api_key(req, res)) {
  2489. return;
  2490. }
  2491. json data = json::parse(req.body);
  2492. const int task_id = llama.queue_tasks.get_new_id();
  2493. llama.queue_results.add_waiting_task_id(task_id);
  2494. llama.request_completion(task_id, data, true, false, -1);
  2495. if (!json_value(data, "stream", false)) {
  2496. std::string completion_text;
  2497. task_result result = llama.queue_results.recv(task_id);
  2498. if (!result.error && result.stop)
  2499. {
  2500. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2501. }
  2502. else
  2503. {
  2504. res.status = 404;
  2505. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2506. }
  2507. llama.queue_results.remove_waiting_task_id(task_id);
  2508. } else {
  2509. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink) {
  2510. while (true)
  2511. {
  2512. task_result result = llama.queue_results.recv(task_id);
  2513. if (!result.error) {
  2514. const std::string str =
  2515. "data: " +
  2516. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2517. "\n\n";
  2518. LOG_VERBOSE("data stream", {
  2519. { "to_send", str }
  2520. });
  2521. if (!sink.write(str.c_str(), str.size()))
  2522. {
  2523. llama.queue_results.remove_waiting_task_id(task_id);
  2524. return false;
  2525. }
  2526. if (result.stop)
  2527. {
  2528. break;
  2529. }
  2530. }
  2531. else
  2532. {
  2533. break;
  2534. }
  2535. }
  2536. llama.queue_results.remove_waiting_task_id(task_id);
  2537. sink.done();
  2538. return true;
  2539. };
  2540. auto on_complete = [task_id, &llama] (bool)
  2541. {
  2542. // cancel
  2543. llama.request_cancel(task_id);
  2544. };
  2545. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2546. }
  2547. });
  2548. svr.Get("/model.json", [&llama](const httplib::Request &, httplib::Response &res)
  2549. {
  2550. const json data = llama.get_model_props();
  2551. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2552. });
  2553. svr.Options(R"(/.*)", [](const httplib::Request &, httplib::Response &res)
  2554. { return res.set_content("", "application/json; charset=utf-8"); });
  2555. svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2556. {
  2557. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2558. const json body = json::parse(req.body);
  2559. std::vector<llama_token> tokens;
  2560. if (body.count("content") != 0)
  2561. {
  2562. tokens = llama.tokenize(body["content"], false);
  2563. }
  2564. const json data = format_tokenizer_response(tokens);
  2565. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2566. });
  2567. svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2568. {
  2569. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2570. const json body = json::parse(req.body);
  2571. std::string content;
  2572. if (body.count("tokens") != 0)
  2573. {
  2574. const std::vector<llama_token> tokens = body["tokens"];
  2575. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  2576. }
  2577. const json data = format_detokenized_response(content);
  2578. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2579. });
  2580. svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
  2581. {
  2582. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2583. const json body = json::parse(req.body);
  2584. json prompt;
  2585. if (body.count("content") != 0)
  2586. {
  2587. prompt = body["content"];
  2588. }
  2589. else
  2590. {
  2591. prompt = "";
  2592. }
  2593. json image_data;
  2594. if (body.count("image_data") != 0) {
  2595. image_data = body["image_data"];
  2596. }
  2597. else
  2598. {
  2599. image_data = "";
  2600. }
  2601. // create and queue the task
  2602. const int task_id = llama.queue_tasks.get_new_id();
  2603. llama.queue_results.add_waiting_task_id(task_id);
  2604. llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
  2605. // get the result
  2606. task_result result = llama.queue_results.recv(task_id);
  2607. llama.queue_results.remove_waiting_task_id(task_id);
  2608. // send the result
  2609. return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
  2610. });
  2611. svr.Post("/v1/embeddings", [&llama](const httplib::Request &req, httplib::Response &res)
  2612. {
  2613. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2614. const json body = json::parse(req.body);
  2615. json prompt;
  2616. if (body.count("input") != 0)
  2617. {
  2618. prompt = body["input"];
  2619. // batch
  2620. if(prompt.is_array()) {
  2621. json data = json::array();
  2622. int i = 0;
  2623. for (const json &elem : prompt) {
  2624. const int task_id = llama.queue_tasks.get_new_id();
  2625. llama.queue_results.add_waiting_task_id(task_id);
  2626. llama.request_completion(task_id, { {"prompt", elem}, { "n_predict", 0} }, false, true, -1);
  2627. // get the result
  2628. task_result result = llama.queue_results.recv(task_id);
  2629. llama.queue_results.remove_waiting_task_id(task_id);
  2630. json embedding = json{
  2631. {"embedding", json_value(result.result_json, "embedding", json::array())},
  2632. {"index", i++},
  2633. {"object", "embedding"}
  2634. };
  2635. data.push_back(embedding);
  2636. }
  2637. json result = format_embeddings_response_oaicompat(body, data);
  2638. return res.set_content(result.dump(), "application/json; charset=utf-8");
  2639. }
  2640. }
  2641. else
  2642. {
  2643. prompt = "";
  2644. }
  2645. // create and queue the task
  2646. const int task_id = llama.queue_tasks.get_new_id();
  2647. llama.queue_results.add_waiting_task_id(task_id);
  2648. llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}}, false, true, -1);
  2649. // get the result
  2650. task_result result = llama.queue_results.recv(task_id);
  2651. llama.queue_results.remove_waiting_task_id(task_id);
  2652. json data = json::array({json{
  2653. {"embedding", json_value(result.result_json, "embedding", json::array())},
  2654. {"index", 0},
  2655. {"object", "embedding"}
  2656. }}
  2657. );
  2658. json root = format_embeddings_response_oaicompat(body, data);
  2659. // send the result
  2660. return res.set_content(root.dump(), "application/json; charset=utf-8");
  2661. });
  2662. // GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
  2663. // "Bus error: 10" - this is on macOS, it does not crash on Linux
  2664. //std::thread t2([&]()
  2665. /*{
  2666. bool running = true;
  2667. while (running)
  2668. {
  2669. running = llama.update_slots();
  2670. }
  2671. }*/
  2672. //);
  2673. llama.queue_tasks.on_new_task(std::bind(
  2674. &llama_server_context::process_single_task, &llama, std::placeholders::_1));
  2675. llama.queue_tasks.on_finish_multitask(std::bind(
  2676. &llama_server_context::on_finish_multitask, &llama, std::placeholders::_1));
  2677. llama.queue_tasks.on_all_tasks_finished(std::bind(
  2678. &llama_server_context::run_on_all_tasks_finished, &llama));
  2679. llama.queue_results.on_multitask_update(std::bind(
  2680. &llama_server_queue::update_multitask,
  2681. &llama.queue_tasks,
  2682. std::placeholders::_1,
  2683. std::placeholders::_2,
  2684. std::placeholders::_3
  2685. ));
  2686. llama.queue_tasks.start_loop();
  2687. t.join();
  2688. llama_backend_free();
  2689. return 0;
  2690. }