1
0

server-context.cpp 153 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789
  1. #include "server-context.h"
  2. #include "server-common.h"
  3. #include "server-http.h"
  4. #include "server-task.h"
  5. #include "server-queue.h"
  6. #include "arg.h"
  7. #include "common.h"
  8. #include "llama.h"
  9. #include "log.h"
  10. #include "sampling.h"
  11. #include "speculative.h"
  12. #include "mtmd.h"
  13. #include "mtmd-helper.h"
  14. #include <cstddef>
  15. #include <cinttypes>
  16. #include <memory>
  17. #include <unordered_set>
  18. #include <filesystem>
  19. // fix problem with std::min and std::max
  20. #if defined(_WIN32)
  21. #define WIN32_LEAN_AND_MEAN
  22. #ifndef NOMINMAX
  23. # define NOMINMAX
  24. #endif
  25. #include <windows.h>
  26. #endif
  27. using json = nlohmann::ordered_json;
  28. constexpr int HTTP_POLLING_SECONDS = 1;
  29. // state diagram: https://github.com/ggml-org/llama.cpp/pull/9283
  30. enum slot_state {
  31. SLOT_STATE_IDLE,
  32. SLOT_STATE_WAIT_OTHER, // after assigning a task, but waiting for parent slot to process prompt
  33. SLOT_STATE_STARTED, // after assigning a task and about to process prompt
  34. SLOT_STATE_PROCESSING_PROMPT,
  35. SLOT_STATE_DONE_PROMPT,
  36. SLOT_STATE_GENERATING,
  37. };
  38. enum server_state {
  39. SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
  40. SERVER_STATE_READY, // Server is ready and model is loaded
  41. };
  42. static bool server_task_type_need_embd(server_task_type task_type) {
  43. switch (task_type) {
  44. case SERVER_TASK_TYPE_EMBEDDING:
  45. case SERVER_TASK_TYPE_RERANK:
  46. return true;
  47. default:
  48. return false;
  49. }
  50. }
  51. static bool server_task_type_need_logits(server_task_type task_type) {
  52. switch (task_type) {
  53. case SERVER_TASK_TYPE_COMPLETION:
  54. case SERVER_TASK_TYPE_INFILL:
  55. return true;
  56. default:
  57. return false;
  58. }
  59. }
  60. struct server_slot {
  61. int id;
  62. llama_batch batch_spec = {};
  63. // TODO: change to unique_ptrs for consistency:
  64. llama_context * ctx = nullptr;
  65. llama_context * ctx_dft = nullptr;
  66. // multimodal
  67. mtmd_context * mctx = nullptr;
  68. common_speculative * spec = nullptr;
  69. std::unique_ptr<const server_task> task;
  70. std::unique_ptr<const server_task> task_prev; // used for debugging
  71. // used to determine the slot that has been used the longest
  72. int64_t t_last_used = -1;
  73. // generation props
  74. int32_t n_ctx = 0; // context size per slot
  75. int32_t n_keep = 0;
  76. int32_t n_decoded = 0;
  77. int32_t n_remaining = -1;
  78. int32_t i_batch = -1;
  79. int32_t n_prompt_tokens_cache = 0;
  80. int32_t n_prompt_tokens_processed = 0;
  81. size_t last_nl_pos = 0;
  82. std::string generated_text;
  83. llama_tokens generated_tokens;
  84. // idx of draft tokens in the main batch
  85. // non-empty if we went to evaluate draft tokens
  86. // ref: https://github.com/ggml-org/llama.cpp/pull/17808
  87. std::vector<int32_t> i_batch_dft;
  88. std::vector<completion_token_output> generated_token_probs;
  89. bool has_next_token = true;
  90. bool has_new_line = false;
  91. bool truncated = false;
  92. stop_type stop;
  93. std::string stopping_word;
  94. // state
  95. slot_state state = SLOT_STATE_IDLE;
  96. server_prompt prompt;
  97. void prompt_save(server_prompt_cache & prompt_cache) const {
  98. GGML_ASSERT(prompt.data.size() == 0);
  99. const size_t cur_size = llama_state_seq_get_size_ext(ctx, id, 0);
  100. SRV_WRN(" - saving prompt with length %d, total state size = %.3f MiB\n",
  101. (int) prompt.tokens.size(), cur_size / (1024.0 * 1024.0));
  102. auto * cur = prompt_cache.alloc(prompt, cur_size);
  103. if (cur == nullptr) {
  104. return;
  105. }
  106. llama_state_seq_get_data_ext(ctx, cur->data.data(), cur_size, id, 0);
  107. }
  108. bool prompt_load(server_prompt_cache & prompt_cache, const server_tokens & tokens) {
  109. bool res = prompt_cache.load(prompt, tokens, ctx, id);
  110. if (!res) {
  111. SLT_WRN(*this, "%s", "failed to load prompt from cache\n");
  112. }
  113. return res;
  114. }
  115. std::vector<common_adapter_lora_info> lora;
  116. int32_t alora_invocation_start = -1;
  117. // sampling
  118. json json_schema;
  119. common_sampler_ptr smpl;
  120. llama_token sampled; // in speculative mode, this is the last accepted token
  121. llama_tokens drafted;
  122. // stats
  123. size_t n_sent_text = 0; // number of sent text character
  124. int64_t t_start_process_prompt;
  125. int64_t t_start_generation;
  126. double t_prompt_processing; // ms
  127. double t_token_generation; // ms
  128. std::function<void(int)> callback_on_release;
  129. // Speculative decoding stats
  130. int32_t n_draft_total = 0; // Total draft tokens generated
  131. int32_t n_draft_accepted = 0; // Draft tokens actually accepted
  132. void reset() {
  133. SLT_DBG(*this, "%s", "\n");
  134. n_prompt_tokens_cache = 0;
  135. last_nl_pos = 0;
  136. generated_text = "";
  137. has_new_line = false;
  138. truncated = false;
  139. stop = STOP_TYPE_NONE;
  140. stopping_word = "";
  141. n_sent_text = 0;
  142. drafted.clear();
  143. i_batch_dft.clear();
  144. generated_tokens.clear();
  145. generated_token_probs.clear();
  146. json_schema = json();
  147. // clear speculative decoding stats
  148. n_draft_total = 0;
  149. n_draft_accepted = 0;
  150. task.reset();
  151. task_prev.reset();
  152. // clear alora start
  153. alora_invocation_start = -1;
  154. }
  155. bool need_embd() const {
  156. GGML_ASSERT(task);
  157. return server_task_type_need_embd(task->type);
  158. }
  159. bool need_logits() const {
  160. GGML_ASSERT(task);
  161. return server_task_type_need_logits(task->type);
  162. }
  163. // if the context does not have a memory module then all embeddings have to be computed within a single ubatch
  164. // also we cannot split if the pooling would require any past tokens
  165. bool can_split() const {
  166. return
  167. !need_embd() ||
  168. (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST);
  169. }
  170. bool can_batch_with(server_slot & other_slot) const {
  171. GGML_ASSERT(task);
  172. return task->type == other_slot.task->type && are_lora_equal(lora, other_slot.lora);
  173. }
  174. bool has_budget(const common_params & global_params) {
  175. GGML_ASSERT(task);
  176. if (task->params.n_predict == -1 && global_params.n_predict == -1) {
  177. return true; // limitless
  178. }
  179. n_remaining = -1;
  180. if (task->params.n_predict != -1) {
  181. n_remaining = task->params.n_predict - n_decoded;
  182. } else if (global_params.n_predict != -1) {
  183. n_remaining = global_params.n_predict - n_decoded;
  184. }
  185. return n_remaining > 0; // no budget
  186. }
  187. bool is_processing() const {
  188. return state != SLOT_STATE_IDLE;
  189. }
  190. bool can_speculate() const {
  191. return ctx_dft;
  192. }
  193. void add_token(const completion_token_output & token) {
  194. if (!is_processing()) {
  195. SLT_WRN(*this, "%s", "slot is not processing\n");
  196. return;
  197. }
  198. generated_token_probs.push_back(token);
  199. }
  200. int get_n_draft_max() const {
  201. if (!can_speculate()) {
  202. return 0;
  203. }
  204. // determine the max draft that fits the current slot state
  205. int n_draft_max = task->params.speculative.n_max;
  206. // note: slot.prompt is not yet expanded with the `id` token sampled above
  207. // also, need to leave space for 1 extra token to allow context shifts
  208. n_draft_max = std::min(n_draft_max, n_ctx - prompt.n_tokens() - 2);
  209. if (n_remaining > 0) {
  210. n_draft_max = std::min(n_draft_max, n_remaining - 1);
  211. }
  212. SLT_DBG(*this, "max possible draft: %d\n", n_draft_max);
  213. if (n_draft_max < task->params.speculative.n_min) {
  214. SLT_DBG(*this, "the max possible draft is too small: %d < %d - skipping speculative decoding\n", n_draft_max, task->params.speculative.n_min);
  215. n_draft_max = 0;
  216. }
  217. return n_draft_max;
  218. }
  219. // note: a slot can also be either a parent or a child
  220. bool is_parent() const {
  221. return is_processing() && task->n_children > 0;
  222. }
  223. bool is_child() const {
  224. return is_processing() && task->id_parent >= 0;
  225. }
  226. void release() {
  227. if (is_processing()) {
  228. GGML_ASSERT(task);
  229. SLT_INF(*this, "stop processing: n_tokens = %d, truncated = %d\n", prompt.n_tokens(), truncated);
  230. t_last_used = ggml_time_us();
  231. t_token_generation = (ggml_time_us() - t_start_generation) / 1e3;
  232. state = SLOT_STATE_IDLE;
  233. task_prev = std::move(task);
  234. task.reset();
  235. callback_on_release(id);
  236. }
  237. }
  238. result_timings get_timings() const {
  239. result_timings timings;
  240. timings.cache_n = n_prompt_tokens_cache;
  241. timings.prompt_n = n_prompt_tokens_processed;
  242. timings.prompt_ms = t_prompt_processing;
  243. timings.prompt_per_token_ms = t_prompt_processing / n_prompt_tokens_processed;
  244. timings.prompt_per_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  245. timings.predicted_n = n_decoded;
  246. timings.predicted_ms = t_token_generation;
  247. timings.predicted_per_token_ms = t_token_generation / n_decoded;
  248. timings.predicted_per_second = 1e3 / t_token_generation * n_decoded;
  249. // Add speculative metrics
  250. if (n_draft_total > 0) {
  251. timings.draft_n = n_draft_total;
  252. timings.draft_n_accepted = n_draft_accepted;
  253. }
  254. return timings;
  255. }
  256. size_t find_stopping_strings(const std::string & text, const size_t last_token_size, bool is_full_stop) {
  257. GGML_ASSERT(task);
  258. size_t stop_pos = std::string::npos;
  259. for (const std::string & word : task->params.antiprompt) {
  260. size_t pos;
  261. if (is_full_stop) {
  262. const size_t tmp = word.size() + last_token_size;
  263. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  264. pos = text.find(word, from_pos);
  265. } else {
  266. // otherwise, partial stop
  267. pos = string_find_partial_stop(text, word);
  268. }
  269. if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) {
  270. if (is_full_stop) {
  271. stop = STOP_TYPE_WORD;
  272. stopping_word = word;
  273. has_next_token = false;
  274. }
  275. stop_pos = pos;
  276. }
  277. }
  278. return stop_pos;
  279. }
  280. void print_timings() const {
  281. const double t_prompt = t_prompt_processing / n_prompt_tokens_processed;
  282. const double n_prompt_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  283. const double t_gen = t_token_generation / n_decoded;
  284. const double n_gen_second = 1e3 / t_token_generation * n_decoded;
  285. SLT_INF(*this,
  286. "\n"
  287. "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  288. " eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  289. " total time = %10.2f ms / %5d tokens\n",
  290. t_prompt_processing, n_prompt_tokens_processed, t_prompt, n_prompt_second,
  291. t_token_generation, n_decoded, t_gen, n_gen_second,
  292. t_prompt_processing + t_token_generation, n_prompt_tokens_processed + n_decoded);
  293. if (n_draft_total > 0) {
  294. const float draft_ratio = (float) n_draft_accepted / n_draft_total;
  295. SLT_CNT(*this,
  296. "draft acceptance rate = %0.5f (%5d accepted / %5d generated)\n",
  297. draft_ratio, n_draft_accepted, n_draft_total
  298. );
  299. }
  300. }
  301. json to_json(bool only_metrics = false) const {
  302. json res;
  303. res = {
  304. {"id", id},
  305. {"n_ctx", n_ctx},
  306. {"speculative", can_speculate()},
  307. {"is_processing", is_processing()},
  308. };
  309. const auto & ptask = task ? task : task_prev;
  310. if (ptask) {
  311. res["id_task"] = ptask->id;
  312. res["params"] = ptask->params.to_json(only_metrics);
  313. res["next_token"] = {
  314. {
  315. {"has_next_token", has_next_token},
  316. {"has_new_line", has_new_line},
  317. {"n_remain", n_remaining},
  318. {"n_decoded", n_decoded},
  319. }
  320. };
  321. if (!only_metrics) {
  322. res["prompt"] = ptask->tokens.detokenize(ctx, true);
  323. res["generated"] = generated_text;
  324. }
  325. }
  326. return res;
  327. }
  328. void copy_state_to(server_slot & other) const {
  329. llama_memory_seq_rm(llama_get_memory(ctx), other.id, 0, -1);
  330. llama_memory_seq_cp(llama_get_memory(ctx), id, other.id, 0, -1);
  331. other.n_decoded = n_decoded;
  332. other.n_remaining = n_remaining;
  333. other.i_batch = i_batch;
  334. other.n_prompt_tokens_cache = n_prompt_tokens_cache;
  335. other.n_prompt_tokens_processed = n_prompt_tokens_processed;
  336. other.prompt = prompt.clone();
  337. }
  338. };
  339. //
  340. // server_metrics
  341. //
  342. struct server_metrics {
  343. int64_t t_start = 0;
  344. uint64_t n_prompt_tokens_processed_total = 0;
  345. uint64_t t_prompt_processing_total = 0;
  346. uint64_t n_tokens_predicted_total = 0;
  347. uint64_t t_tokens_generation_total = 0;
  348. uint64_t n_tokens_max = 0;
  349. uint64_t n_prompt_tokens_processed = 0;
  350. uint64_t t_prompt_processing = 0;
  351. uint64_t n_tokens_predicted = 0;
  352. uint64_t t_tokens_generation = 0;
  353. uint64_t n_decode_total = 0;
  354. uint64_t n_busy_slots_total = 0;
  355. void init() {
  356. t_start = ggml_time_us();
  357. }
  358. void on_prompt_eval(const server_slot & slot) {
  359. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  360. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  361. t_prompt_processing += slot.t_prompt_processing;
  362. t_prompt_processing_total += slot.t_prompt_processing;
  363. n_tokens_max = std::max(n_tokens_max, (uint64_t) slot.prompt.n_tokens());
  364. }
  365. void on_prediction(const server_slot & slot) {
  366. n_tokens_predicted_total += slot.n_decoded;
  367. n_tokens_predicted += slot.n_decoded;
  368. t_tokens_generation += slot.t_token_generation;
  369. t_tokens_generation_total += slot.t_token_generation;
  370. }
  371. void on_decoded(const std::vector<server_slot> & slots) {
  372. n_decode_total++;
  373. for (const auto & slot : slots) {
  374. if (slot.is_processing()) {
  375. n_busy_slots_total++;
  376. }
  377. n_tokens_max = std::max(n_tokens_max, (uint64_t) slot.prompt.n_tokens());
  378. }
  379. }
  380. void reset_bucket() {
  381. n_prompt_tokens_processed = 0;
  382. t_prompt_processing = 0;
  383. n_tokens_predicted = 0;
  384. t_tokens_generation = 0;
  385. }
  386. };
  387. //
  388. // server_context_impl (private implementation)
  389. //
  390. struct server_context_impl {
  391. common_params params_base;
  392. // note: keep these alive - they determine the lifetime of the model, context, etc.
  393. common_init_result_ptr llama_init;
  394. common_init_result_ptr llama_init_dft;
  395. llama_model * model = nullptr;
  396. llama_context * ctx = nullptr;
  397. // multimodal
  398. mtmd_context * mctx = nullptr;
  399. const llama_vocab * vocab = nullptr;
  400. bool vocab_dft_compatible = true;
  401. llama_model * model_dft = nullptr;
  402. llama_context_params cparams_dft;
  403. llama_batch batch {};
  404. bool add_bos_token = true;
  405. int32_t n_ctx; // total context for all clients / slots
  406. // slots / clients
  407. std::vector<server_slot> slots;
  408. int slots_debug = 0;
  409. server_queue queue_tasks;
  410. server_response queue_results;
  411. std::unique_ptr<server_prompt_cache> prompt_cache;
  412. server_metrics metrics;
  413. // Necessary similarity of prompt for slot selection
  414. float slot_prompt_similarity = 0.0f;
  415. std::string model_name; // name of the loaded model, to be used by API
  416. common_chat_templates_ptr chat_templates;
  417. oaicompat_parser_options oai_parser_opt;
  418. ~server_context_impl() {
  419. mtmd_free(mctx);
  420. // Clear any sampling context
  421. for (server_slot & slot : slots) {
  422. llama_free(slot.ctx_dft);
  423. slot.ctx_dft = nullptr;
  424. common_speculative_free(slot.spec);
  425. slot.spec = nullptr;
  426. llama_batch_free(slot.batch_spec);
  427. }
  428. llama_batch_free(batch);
  429. }
  430. // load the model and initialize llama_context
  431. bool load_model(const common_params & params) {
  432. SRV_INF("loading model '%s'\n", params.model.path.c_str());
  433. params_base = params;
  434. llama_init = common_init_from_params(params_base);
  435. model = llama_init->model();
  436. ctx = llama_init->context();
  437. if (model == nullptr) {
  438. SRV_ERR("failed to load model, '%s'\n", params_base.model.path.c_str());
  439. return false;
  440. }
  441. vocab = llama_model_get_vocab(model);
  442. n_ctx = llama_n_ctx(ctx);
  443. add_bos_token = llama_vocab_get_add_bos(vocab);
  444. if (params_base.has_speculative()) {
  445. SRV_INF("loading draft model '%s'\n", params_base.speculative.model.path.c_str());
  446. auto params_dft = params_base;
  447. params_dft.devices = params_base.speculative.devices;
  448. params_dft.model = params_base.speculative.model;
  449. params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? llama_n_ctx_seq(ctx) : params_base.speculative.n_ctx;
  450. params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
  451. params_dft.n_parallel = 1;
  452. params_dft.cache_type_k = params_base.speculative.cache_type_k;
  453. params_dft.cache_type_v = params_base.speculative.cache_type_v;
  454. params_dft.cpuparams.n_threads = params_base.speculative.cpuparams.n_threads;
  455. params_dft.cpuparams_batch.n_threads = params_base.speculative.cpuparams_batch.n_threads;
  456. params_dft.tensor_buft_overrides = params_base.speculative.tensor_buft_overrides;
  457. llama_init_dft = common_init_from_params(params_dft);
  458. model_dft = llama_init_dft->model();
  459. if (model_dft == nullptr) {
  460. SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.path.c_str());
  461. return false;
  462. }
  463. vocab_dft_compatible = common_speculative_are_compatible(ctx, llama_init_dft->context());
  464. if (!vocab_dft_compatible) {
  465. SRV_INF("the draft model '%s' is not compatible with the target model '%s'. tokens will be translated between the draft and target models.\n", params_base.speculative.model.path.c_str(), params_base.model.path.c_str());
  466. }
  467. const int n_ctx_dft = llama_n_ctx(llama_init_dft->context());
  468. cparams_dft = common_context_params_to_llama(params_dft);
  469. cparams_dft.n_batch = n_ctx_dft;
  470. // the context is not needed - we will create one for each slot
  471. llama_init_dft->free_context();
  472. }
  473. chat_templates = common_chat_templates_init(model, params_base.chat_template);
  474. try {
  475. common_chat_format_example(chat_templates.get(), params.use_jinja, params.default_template_kwargs);
  476. } catch (const std::exception & e) {
  477. SRV_WRN("%s: Chat template parsing error: %s\n", __func__, e.what());
  478. SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
  479. chat_templates = common_chat_templates_init(model, "chatml");
  480. }
  481. std::string & mmproj_path = params_base.mmproj.path;
  482. if (!mmproj_path.empty()) {
  483. mtmd_helper_log_set(common_log_default_callback, nullptr);
  484. mtmd_context_params mparams = mtmd_context_params_default();
  485. mparams.use_gpu = params_base.mmproj_use_gpu;
  486. mparams.print_timings = false;
  487. mparams.n_threads = params_base.cpuparams.n_threads;
  488. mparams.flash_attn_type = params_base.flash_attn_type;
  489. mparams.warmup = params_base.warmup;
  490. mparams.image_min_tokens = params_base.image_min_tokens;
  491. mparams.image_max_tokens = params_base.image_max_tokens;
  492. mctx = mtmd_init_from_file(mmproj_path.c_str(), model, mparams);
  493. if (mctx == nullptr) {
  494. SRV_ERR("failed to load multimodal model, '%s'\n", mmproj_path.c_str());
  495. return false;
  496. }
  497. SRV_INF("loaded multimodal model, '%s'\n", mmproj_path.c_str());
  498. if (params_base.ctx_shift) {
  499. params_base.ctx_shift = false;
  500. SRV_WRN("%s\n", "ctx_shift is not supported by multimodal, it will be disabled");
  501. }
  502. if (params_base.n_cache_reuse) {
  503. params_base.n_cache_reuse = 0;
  504. SRV_WRN("%s\n", "cache_reuse is not supported by multimodal, it will be disabled");
  505. }
  506. if (params_base.has_speculative()) {
  507. SRV_ERR("%s\n", "err: speculative decode is not supported by multimodal");
  508. return false;
  509. }
  510. }
  511. if (!llama_memory_can_shift(llama_get_memory(ctx))) {
  512. if (params_base.ctx_shift) {
  513. params_base.ctx_shift = false;
  514. SRV_WRN("%s\n", "ctx_shift is not supported by this context, it will be disabled");
  515. }
  516. if (params_base.n_cache_reuse) {
  517. params_base.n_cache_reuse = 0;
  518. SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled");
  519. }
  520. }
  521. return true;
  522. }
  523. // initialize slots and server-related data
  524. void init() {
  525. // wiring up server queues
  526. queue_tasks.on_new_task([this](server_task && task) {
  527. process_single_task(std::move(task));
  528. });
  529. queue_tasks.on_update_slots([this]() {
  530. update_slots();
  531. });
  532. // Necessary similarity of prompt for slot selection
  533. slot_prompt_similarity = params_base.slot_prompt_similarity;
  534. // setup slots
  535. SRV_INF("initializing slots, n_slots = %d\n", params_base.n_parallel);
  536. const int n_ctx_train = llama_model_n_ctx_train(model);
  537. int n_ctx_slot = llama_n_ctx_seq(ctx);
  538. if (n_ctx_slot > n_ctx_train) {
  539. SRV_WRN("the slot context (%d) exceeds the training context of the model (%d) - capping\n", n_ctx_slot, n_ctx_train);
  540. n_ctx_slot = n_ctx_train;
  541. }
  542. for (int i = 0; i < params_base.n_parallel; i++) {
  543. server_slot slot;
  544. slot.id = i;
  545. slot.ctx = ctx;
  546. slot.n_ctx = n_ctx_slot;
  547. slot.mctx = mctx;
  548. slot.prompt.tokens.has_mtmd = mctx != nullptr;
  549. if (model_dft) {
  550. slot.batch_spec = llama_batch_init(params_base.speculative.n_max + 1, 0, 1);
  551. // TODO: rework speculative decoding [TAG_SERVER_SPEC_REWORK]
  552. slot.ctx_dft = llama_init_from_model(model_dft, cparams_dft);
  553. if (slot.ctx_dft == nullptr) {
  554. SRV_ERR("%s", "failed to create draft context\n");
  555. return;
  556. }
  557. slot.spec = common_speculative_init(slot.ctx, slot.ctx_dft);
  558. if (slot.spec == nullptr) {
  559. SRV_ERR("%s", "failed to create speculator\n");
  560. return;
  561. }
  562. for (auto & pair : params_base.speculative.replacements) {
  563. common_speculative_add_replacement_tgt_dft(slot.spec, pair.first.c_str(), pair.second.c_str());
  564. }
  565. }
  566. SLT_INF(slot, "new slot, n_ctx = %d\n", slot.n_ctx);
  567. slot.callback_on_release = [this](int) {
  568. queue_tasks.pop_deferred_task();
  569. };
  570. slot.reset();
  571. slots.push_back(std::move(slot));
  572. }
  573. {
  574. const char * LLAMA_SERVER_SLOTS_DEBUG = getenv("LLAMA_SERVER_SLOTS_DEBUG");
  575. slots_debug = LLAMA_SERVER_SLOTS_DEBUG ? atoi(LLAMA_SERVER_SLOTS_DEBUG) : 0;
  576. if (slots_debug) {
  577. SRV_WRN("slots debug = %d\n", slots_debug);
  578. }
  579. }
  580. // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
  581. // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
  582. {
  583. const int32_t n_batch = llama_n_batch(ctx);
  584. batch = llama_batch_init(std::max(n_batch, params_base.n_parallel), 0, 1);
  585. }
  586. metrics.init();
  587. if (params_base.cache_ram_mib != 0) {
  588. if (params_base.cache_ram_mib < 0) {
  589. SRV_WRN("prompt cache is enabled, size limit: %s\n", "no limit");
  590. } else {
  591. SRV_WRN("prompt cache is enabled, size limit: %d MiB\n", params_base.cache_ram_mib);
  592. }
  593. SRV_WRN("%s", "use `--cache-ram 0` to disable the prompt cache\n");
  594. prompt_cache = std::make_unique<server_prompt_cache>(params_base.cache_ram_mib, n_ctx);
  595. } else {
  596. SRV_WRN("%s", "prompt cache is disabled - use `--cache-ram N` to enable it\n");
  597. }
  598. SRV_WRN("%s", "for more info see https://github.com/ggml-org/llama.cpp/pull/16391\n");
  599. if (!params_base.model_alias.empty()) {
  600. // user explicitly specified model name
  601. model_name = params_base.model_alias;
  602. } else if (!params_base.model.name.empty()) {
  603. // use model name in registry format (for models in cache)
  604. model_name = params_base.model.name;
  605. } else {
  606. // fallback: derive model name from file name
  607. auto model_path = std::filesystem::path(params_base.model.path);
  608. model_name = model_path.filename().string();
  609. }
  610. // thinking is enabled if:
  611. // 1. It's not explicitly disabled (reasoning_budget == 0)
  612. // 2. The chat template supports it
  613. const bool enable_thinking = params_base.use_jinja && params_base.reasoning_budget != 0 && common_chat_templates_support_enable_thinking(chat_templates.get());
  614. SRV_INF("thinking = %d\n", enable_thinking);
  615. oai_parser_opt = {
  616. /* use_jinja */ params_base.use_jinja,
  617. /* prefill_assistant */ params_base.prefill_assistant,
  618. /* reasoning_format */ params_base.reasoning_format,
  619. /* chat_template_kwargs */ params_base.default_template_kwargs,
  620. /* common_chat_templates */ chat_templates.get(),
  621. /* allow_image */ mctx ? mtmd_support_vision(mctx) : false,
  622. /* allow_audio */ mctx ? mtmd_support_audio (mctx) : false,
  623. /* enable_thinking */ enable_thinking,
  624. /* media_path */ params_base.media_path,
  625. };
  626. // print sample chat example to make it clear which template is used
  627. LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
  628. common_chat_templates_source(chat_templates.get()),
  629. common_chat_format_example(chat_templates.get(), params_base.use_jinja, params_base.default_template_kwargs).c_str());
  630. }
  631. server_slot * get_slot_by_id(int id) {
  632. for (server_slot & slot : slots) {
  633. if (slot.id == id) {
  634. return &slot;
  635. }
  636. }
  637. return nullptr;
  638. }
  639. server_slot * get_available_slot(const server_task & task) {
  640. server_slot * ret = nullptr;
  641. bool update_cache = false;
  642. // find the slot that has at least n% prompt similarity
  643. if (ret == nullptr && slot_prompt_similarity != 0.0f) {
  644. float sim_best = 0;
  645. for (server_slot & slot : slots) {
  646. // skip the slot if it is not available
  647. if (slot.is_processing()) {
  648. continue;
  649. }
  650. const auto & tokens = slot.prompt.tokens;
  651. // skip the slot if it does not contains cached tokens
  652. if (tokens.empty()) {
  653. continue;
  654. }
  655. // fraction of the Longest Common Prefix length with respect to the input prompt length
  656. const float sim_cur = float(tokens.get_common_prefix(task.tokens)) / task.tokens.size();
  657. // select the current slot if the criteria match
  658. if (sim_cur > sim_best && sim_cur > slot_prompt_similarity) {
  659. sim_best = sim_cur;
  660. ret = &slot;
  661. }
  662. }
  663. if (ret != nullptr) {
  664. const float f_keep = (sim_best*task.tokens.size()) / ret->prompt.tokens.size();
  665. SLT_INF(*ret, "selected slot by LCP similarity, sim_best = %.3f (> %.3f thold), f_keep = %.3f\n",
  666. sim_best, slot_prompt_similarity, f_keep);
  667. // if we are about to lose a large portion of the existing context - save it in the prompt cache
  668. if (f_keep < 0.5f) {
  669. update_cache = true;
  670. }
  671. }
  672. }
  673. // find the slot that has been least recently used
  674. if (ret == nullptr) {
  675. int64_t t_last = -1;
  676. for (server_slot & slot : slots) {
  677. // skip the slot if it is not available
  678. if (slot.is_processing()) {
  679. continue;
  680. }
  681. // select the current slot if the criteria match
  682. if (!ret || slot.t_last_used <= t_last) {
  683. t_last = slot.t_last_used;
  684. ret = &slot;
  685. }
  686. }
  687. if (ret != nullptr) {
  688. SLT_INF(*ret, "selected slot by LRU, t_last = %" PRId64 "\n", t_last);
  689. update_cache = true;
  690. }
  691. }
  692. if (ret) {
  693. const auto & tokens = ret->prompt.tokens;
  694. update_cache = update_cache && prompt_cache;
  695. // cache prompts only for completion tasks
  696. update_cache = update_cache && task.type == SERVER_TASK_TYPE_COMPLETION;
  697. // don't update the cache if the slot's context is empty
  698. update_cache = update_cache && tokens.size() > 0;
  699. // TODO: mtmd does not support prompt cache
  700. update_cache = update_cache && (ret->mctx == nullptr);
  701. if (update_cache) {
  702. SRV_WRN("%s", "updating prompt cache\n");
  703. const int64_t t_start = ggml_time_us();
  704. ret->prompt_save(*prompt_cache);
  705. if (!ret->prompt_load(*prompt_cache, task.tokens)) {
  706. clear_slot(*ret);
  707. }
  708. prompt_cache->update();
  709. SRV_WRN("prompt cache update took %.2f ms\n", (ggml_time_us() - t_start) / 1000.0);
  710. }
  711. }
  712. return ret;
  713. }
  714. void clear_slot(server_slot & slot) const {
  715. GGML_ASSERT(!slot.is_processing());
  716. SLT_WRN(slot, "clearing slot with %zu tokens\n", slot.prompt.tokens.size());
  717. llama_memory_seq_rm(llama_get_memory(ctx), slot.id, -1, -1);
  718. slot.prompt.tokens.clear();
  719. }
  720. // return true if at least one slot has been cleared
  721. // TODO: improve logic
  722. // - smarter decision which slot to clear (LRU or longest prompt?)
  723. // - move slot to level 2 cache instead of removing?
  724. // - instead of purging, try to store and resume later?
  725. bool try_clear_idle_slots() {
  726. bool res = false;
  727. if (!params_base.kv_unified) {
  728. return res;
  729. }
  730. for (auto & slot : slots) {
  731. if (slot.is_processing()) {
  732. continue;
  733. }
  734. if (slot.prompt.n_tokens() > 0) {
  735. SRV_WRN("purging slot %d with %zu tokens\n", slot.id, slot.prompt.tokens.size());
  736. clear_slot(slot);
  737. res = true;
  738. // clear slots one by one
  739. break;
  740. }
  741. }
  742. return res;
  743. }
  744. bool launch_slot_with_task(server_slot & slot, server_task && task) {
  745. slot.reset();
  746. if (!are_lora_equal(task.params.lora, slot.lora)) {
  747. // if lora has changed, check to see if the cache should be cleared
  748. if (lora_should_clear_cache(slot.lora, task.params.lora)) {
  749. SLT_INF(slot, "clearing cache for lora change. %zu loras -> %zu loras\n", slot.lora.size(), task.params.lora.size());
  750. slot.prompt.tokens.clear();
  751. } else {
  752. SLT_INF(slot, "keeping cache for alora. %zu target loras\n", task.params.lora.size());
  753. }
  754. slot.lora = task.params.lora;
  755. }
  756. // if using alora, make sure it's only a single one requested and active
  757. size_t alora_invocation_start = task.tokens.size();
  758. if (lora_all_alora(slot.lora)) {
  759. const auto & enabled_ids = lora_get_enabled_ids(slot.lora);
  760. // TODO: This will error out if a user requests two aloras, but only
  761. // provides the activation string for one. We could, instead search
  762. // for all requested alora activation strings and then either keep
  763. // only the last one, or reject if multiple are found.
  764. if (enabled_ids.size() != 1) {
  765. send_error(task, "Cannot run multiple aLoRAs in a single request", ERROR_TYPE_INVALID_REQUEST);
  766. return false;
  767. }
  768. const auto & lora = slot.lora[enabled_ids[0]].ptr;
  769. // get the pointer and count for the invocation tokens
  770. const uint64_t n_invocation_tokens = llama_adapter_get_alora_n_invocation_tokens(lora);
  771. const llama_token * invocation_tokens = llama_adapter_get_alora_invocation_tokens (lora);
  772. // scan backwards through the prompt tokens to find the last
  773. // occurrence of the invocation sequence
  774. int match_idx = static_cast<int>(n_invocation_tokens) - 1;
  775. for (int i = task.tokens.size() - 1; i >= 0; --i) {
  776. // the token in this position matches the next token to find in
  777. // the invocation sequence
  778. if (task.tokens[i] == invocation_tokens[match_idx]) {
  779. // if it's a full match, we've found the start
  780. if (match_idx == 0) {
  781. alora_invocation_start = i;
  782. break;
  783. }
  784. // otherwise, check the next token in the sequence
  785. --match_idx;
  786. } else {
  787. // no match in this position, so start looking over again
  788. match_idx = static_cast<int>(n_invocation_tokens) - 1;
  789. }
  790. }
  791. // if the activation string is not found, disable the alora
  792. if (alora_invocation_start == task.tokens.size()) {
  793. SLT_DBG(slot, "alora %zu requested, but not found. deactivating\n", enabled_ids[0]);
  794. slot.lora[enabled_ids[0]].scale = 0.0f;
  795. } else {
  796. SLT_DBG(slot, "alora %zu activated starting at %zu\n", enabled_ids[0], alora_invocation_start);
  797. slot.alora_invocation_start = alora_invocation_start;
  798. }
  799. }
  800. if (!task.tokens.validate(ctx)) {
  801. send_error(task, "Prompt contains invalid tokens", ERROR_TYPE_INVALID_REQUEST);
  802. return false;
  803. }
  804. SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str());
  805. // initialize samplers
  806. {
  807. slot.smpl.reset(common_sampler_init(model, task.params.sampling));
  808. if (slot.smpl == nullptr) {
  809. // for now, the only error that may happen here is invalid grammar
  810. send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST);
  811. return false;
  812. }
  813. SLT_INF(slot, "sampler chain: %s\n", common_sampler_print(slot.smpl.get()).c_str());
  814. }
  815. // initialize draft batch
  816. // TODO: rework speculative decoding [TAG_SERVER_SPEC_REWORK]
  817. if (slot.ctx_dft) {
  818. llama_batch_free(slot.batch_spec);
  819. slot.batch_spec = llama_batch_init(task.params.speculative.n_max + 1, 0, 1);
  820. }
  821. slot.task = std::make_unique<const server_task>(std::move(task));
  822. slot.state = slot.is_child()
  823. ? SLOT_STATE_WAIT_OTHER // wait for the parent to process prompt
  824. : SLOT_STATE_STARTED;
  825. SLT_INF(slot, "%s", "processing task\n");
  826. return true;
  827. }
  828. bool process_token(completion_token_output & result, server_slot & slot) {
  829. // remember which tokens were sampled - used for repetition penalties during sampling
  830. const std::string token_str = result.text_to_send;
  831. slot.sampled = result.tok;
  832. slot.generated_text += token_str;
  833. if (slot.task->params.return_tokens) {
  834. slot.generated_tokens.push_back(result.tok);
  835. }
  836. slot.has_next_token = true;
  837. // check if there is incomplete UTF-8 character at the end
  838. bool incomplete = validate_utf8(slot.generated_text) < slot.generated_text.size();
  839. // search stop word and delete it
  840. if (!incomplete) {
  841. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  842. const std::string str_test = slot.generated_text.substr(pos);
  843. bool send_text = true;
  844. size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), true);
  845. if (stop_pos != std::string::npos) {
  846. slot.generated_text.erase(
  847. slot.generated_text.begin() + pos + stop_pos,
  848. slot.generated_text.end());
  849. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  850. } else if (slot.has_next_token && !llama_vocab_is_eog(vocab, result.tok) ) {
  851. stop_pos = slot.find_stopping_strings(str_test, token_str.size(), false);
  852. send_text = stop_pos == std::string::npos;
  853. }
  854. // check if there is any token to predict
  855. if (send_text) {
  856. // no send the stop word in the response
  857. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  858. slot.n_sent_text += result.text_to_send.size();
  859. // add the token to slot queue and cache
  860. } else {
  861. result.text_to_send = "";
  862. }
  863. slot.add_token(result);
  864. if (slot.task->params.stream) {
  865. send_partial_response(slot, result, false);
  866. }
  867. }
  868. if (incomplete) {
  869. slot.has_next_token = true;
  870. }
  871. // if context shifting is disabled, make sure that we don't run out of context
  872. if (!params_base.ctx_shift && slot.prompt.n_tokens() + 1 >= slot.n_ctx) {
  873. slot.truncated = true;
  874. slot.stop = STOP_TYPE_LIMIT;
  875. slot.has_next_token = false;
  876. SLT_DBG(slot, "stopped due to running out of context capacity, prompt.n_tokens() = %d, task.n_tokens = %d, n_decoded = %d, n_ctx = %d\n",
  877. slot.prompt.n_tokens(), slot.task->n_tokens(), slot.n_decoded, slot.n_ctx);
  878. }
  879. // check the limits
  880. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params_base)) {
  881. slot.stop = STOP_TYPE_LIMIT;
  882. slot.has_next_token = false;
  883. SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.task->params.n_predict);
  884. }
  885. if (slot.has_new_line) {
  886. // require that each new line has a whitespace prefix (i.e. indentation) of at least slot.params.n_indent
  887. if (slot.task->params.n_indent > 0) {
  888. // check the current indentation
  889. // TODO: improve by not doing it more than once for each new line
  890. if (slot.last_nl_pos > 0) {
  891. size_t pos = slot.last_nl_pos;
  892. int n_indent = 0;
  893. while (pos < slot.generated_text.size() && (slot.generated_text[pos] == ' ' || slot.generated_text[pos] == '\t')) {
  894. n_indent++;
  895. pos++;
  896. }
  897. if (pos < slot.generated_text.size() && n_indent < slot.task->params.n_indent) {
  898. slot.stop = STOP_TYPE_LIMIT;
  899. slot.has_next_token = false;
  900. // cut the last line
  901. slot.generated_text.erase(pos, std::string::npos);
  902. SLT_DBG(slot, "stopped by indentation limit, n_decoded = %d, n_indent = %d\n", slot.n_decoded, n_indent);
  903. }
  904. }
  905. // find the next new line
  906. {
  907. const size_t pos = slot.generated_text.find('\n', slot.last_nl_pos);
  908. if (pos != std::string::npos) {
  909. slot.last_nl_pos = pos + 1;
  910. }
  911. }
  912. }
  913. }
  914. // check if there is a new line in the generated text
  915. if (result.text_to_send.find('\n') != std::string::npos) {
  916. slot.has_new_line = true;
  917. // if we have seen a new line, we stop after a certain time limit, but only upon another new line
  918. if (slot.task->params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.task->params.t_max_predict_ms)) {
  919. slot.stop = STOP_TYPE_LIMIT;
  920. slot.has_next_token = false;
  921. SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.task->params.t_max_predict_ms);
  922. }
  923. }
  924. if (llama_vocab_is_eog(vocab, result.tok)) {
  925. slot.stop = STOP_TYPE_EOS;
  926. slot.has_next_token = false;
  927. SLT_DBG(slot, "%s", "stopped by EOS\n");
  928. }
  929. SLT_DBG(slot, "n_decoded = %d, n_remaining = %d, next token: %5d '%s'\n", slot.n_decoded, slot.n_remaining, result.tok, token_str.c_str());
  930. return slot.has_next_token; // continue
  931. }
  932. void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) const {
  933. const size_t n_probs = slot.task->params.sampling.n_probs;
  934. if (post_sampling) {
  935. const auto * cur_p = common_sampler_get_candidates(slot.smpl.get(), true);
  936. const size_t max_probs = cur_p->size;
  937. // set probability for sampled token
  938. for (size_t i = 0; i < max_probs; i++) {
  939. if (cur_p->data[i].id == result.tok) {
  940. result.prob = cur_p->data[i].p;
  941. break;
  942. }
  943. }
  944. // set probability for top n_probs tokens
  945. result.probs.reserve(max_probs);
  946. for (size_t i = 0; i < std::min(max_probs, n_probs); i++) {
  947. result.probs.push_back({
  948. cur_p->data[i].id,
  949. common_token_to_piece(ctx, cur_p->data[i].id, special),
  950. cur_p->data[i].p
  951. });
  952. }
  953. } else {
  954. // TODO: optimize this with min-p optimization
  955. std::vector<llama_token_data> cur = get_token_probabilities(ctx, idx);
  956. // set probability for sampled token
  957. for (size_t i = 0; i < cur.size(); i++) {
  958. // set probability for sampled token
  959. if (cur[i].id == result.tok) {
  960. result.prob = cur[i].p;
  961. break;
  962. }
  963. }
  964. // set probability for top n_probs tokens
  965. result.probs.reserve(n_probs);
  966. for (size_t i = 0; i < std::min(cur.size(), n_probs); i++) {
  967. result.probs.push_back({
  968. cur[i].id,
  969. common_token_to_piece(ctx, cur[i].id, special),
  970. cur[i].p
  971. });
  972. }
  973. }
  974. }
  975. void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  976. send_error(task.id, error, type);
  977. }
  978. void send_error(const server_slot & slot, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  979. send_error(slot.task->id, error, type, slot.task->n_tokens(), slot.n_ctx);
  980. }
  981. void send_error(const int id_task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER, const int32_t n_prompt_tokens = 0, const int32_t n_ctx = 0) {
  982. SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str());
  983. if (type == ERROR_TYPE_EXCEED_CONTEXT_SIZE) {
  984. GGML_ASSERT(n_ctx > 0 && n_prompt_tokens > 0);
  985. }
  986. auto res = std::make_unique<server_task_result_error>();
  987. res->id = id_task;
  988. res->err_type = type;
  989. res->err_msg = error;
  990. res->n_prompt_tokens = n_prompt_tokens;
  991. res->n_ctx = n_ctx;
  992. queue_results.send(std::move(res));
  993. }
  994. // if multimodal is enabled, send an error and return false
  995. bool check_no_mtmd(const int id_task) {
  996. if (mctx) {
  997. send_error(id_task, "This feature is not supported by multimodal", ERROR_TYPE_NOT_SUPPORTED);
  998. return false;
  999. }
  1000. return true;
  1001. }
  1002. void send_partial_response(server_slot & slot, const completion_token_output & tkn, bool is_progress) {
  1003. auto res = std::make_unique<server_task_result_cmpl_partial>();
  1004. res->id = slot.task->id;
  1005. res->index = slot.task->index;
  1006. if (is_progress) {
  1007. res->is_progress = true;
  1008. res->progress.total = slot.task->n_tokens();
  1009. res->progress.cache = slot.n_prompt_tokens_cache;
  1010. res->progress.processed = slot.prompt.tokens.size();
  1011. res->progress.time_ms = (ggml_time_us() - slot.t_start_process_prompt) / 1000;
  1012. } else {
  1013. res->content = tkn.text_to_send;
  1014. res->tokens = { tkn.tok };
  1015. }
  1016. res->n_decoded = slot.n_decoded;
  1017. res->n_prompt_tokens = slot.task->n_tokens();
  1018. res->post_sampling_probs = slot.task->params.post_sampling_probs;
  1019. res->verbose = slot.task->params.verbose;
  1020. res->res_type = slot.task->params.res_type;
  1021. res->oaicompat_model = slot.task->params.oaicompat_model;
  1022. res->oaicompat_cmpl_id = slot.task->params.oaicompat_cmpl_id;
  1023. // populate res.probs_output
  1024. if (slot.task->params.sampling.n_probs > 0) {
  1025. res->prob_output = tkn; // copy the token probs
  1026. }
  1027. // populate timings if this is final response or timings_per_token is enabled
  1028. if (slot.stop != STOP_TYPE_NONE || slot.task->params.timings_per_token) {
  1029. res->timings = slot.get_timings();
  1030. }
  1031. queue_results.send(std::move(res));
  1032. }
  1033. void send_final_response(server_slot & slot) {
  1034. auto res = std::make_unique<server_task_result_cmpl_final>();
  1035. res->id = slot.task->id;
  1036. res->id_slot = slot.id;
  1037. res->index = slot.task->index;
  1038. // in stream mode, content and tokens are already in last partial chunk
  1039. if (slot.task->params.stream) {
  1040. res->content = "";
  1041. res->tokens = llama_tokens{};
  1042. } else {
  1043. res->content = std::move(slot.generated_text);
  1044. res->tokens = std::move(slot.generated_tokens);
  1045. }
  1046. res->timings = slot.get_timings();
  1047. res->prompt = slot.task->tokens.detokenize(ctx, true);
  1048. res->response_fields = std::move(slot.task->params.response_fields);
  1049. res->truncated = slot.truncated;
  1050. res->n_decoded = slot.n_decoded;
  1051. res->n_prompt_tokens = slot.task->n_tokens();
  1052. res->n_tokens_cached = slot.prompt.n_tokens();
  1053. res->has_new_line = slot.has_new_line;
  1054. res->stopping_word = slot.stopping_word;
  1055. res->stop = slot.stop;
  1056. res->post_sampling_probs = slot.task->params.post_sampling_probs;
  1057. res->verbose = slot.task->params.verbose;
  1058. res->stream = slot.task->params.stream;
  1059. res->include_usage = slot.task->params.include_usage;
  1060. res->res_type = slot.task->params.res_type;
  1061. res->oaicompat_model = slot.task->params.oaicompat_model;
  1062. res->oaicompat_cmpl_id = slot.task->params.oaicompat_cmpl_id;
  1063. // populate res.probs_output
  1064. if (slot.task->params.sampling.n_probs > 0) {
  1065. if (!slot.task->params.stream && slot.stop == STOP_TYPE_WORD) {
  1066. const llama_tokens stop_word_toks = common_tokenize(ctx, slot.stopping_word, false);
  1067. size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
  1068. res->probs_output = std::vector<completion_token_output>(
  1069. slot.generated_token_probs.begin(),
  1070. slot.generated_token_probs.end() - safe_offset);
  1071. } else {
  1072. res->probs_output = std::vector<completion_token_output>(
  1073. slot.generated_token_probs.begin(),
  1074. slot.generated_token_probs.end());
  1075. }
  1076. }
  1077. res->generation_params = slot.task->params; // copy the parameters
  1078. queue_results.send(std::move(res));
  1079. }
  1080. void send_embedding(const server_slot & slot, const llama_batch & batch) {
  1081. auto res = std::make_unique<server_task_result_embd>();
  1082. res->id = slot.task->id;
  1083. res->index = slot.task->index;
  1084. res->n_tokens = slot.task->n_tokens();
  1085. res->res_type = slot.task->params.res_type;
  1086. const int n_embd = llama_model_n_embd(model);
  1087. std::vector<float> embd_res(n_embd, 0.0f);
  1088. for (int i = 0; i < batch.n_tokens; ++i) {
  1089. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
  1090. continue;
  1091. }
  1092. const float * embd = nullptr;
  1093. if (llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE) {
  1094. embd = llama_get_embeddings_ith(ctx, i);
  1095. } else {
  1096. embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1097. }
  1098. if (embd == nullptr) {
  1099. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1100. res->embedding.push_back(std::vector<float>(n_embd, 0.0f));
  1101. continue;
  1102. }
  1103. // normalize only when there is pooling
  1104. if (llama_pooling_type(slot.ctx) != LLAMA_POOLING_TYPE_NONE) {
  1105. common_embd_normalize(embd, embd_res.data(), n_embd, slot.task->params.embd_normalize);
  1106. res->embedding.push_back(embd_res);
  1107. break;
  1108. }
  1109. res->embedding.emplace_back(embd, embd + n_embd);
  1110. }
  1111. SLT_DBG(slot, "%s", "sending embeddings\n");
  1112. queue_results.send(std::move(res));
  1113. }
  1114. void send_rerank(const server_slot & slot, const llama_batch & batch) {
  1115. auto res = std::make_unique<server_task_result_rerank>();
  1116. res->id = slot.task->id;
  1117. res->index = slot.task->index;
  1118. res->n_tokens = slot.task->n_tokens();
  1119. for (int i = 0; i < batch.n_tokens; ++i) {
  1120. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
  1121. continue;
  1122. }
  1123. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1124. if (embd == NULL) {
  1125. embd = llama_get_embeddings_ith(ctx, i);
  1126. }
  1127. if (embd == NULL) {
  1128. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1129. res->score = -1e6;
  1130. continue;
  1131. }
  1132. res->score = embd[0];
  1133. }
  1134. SLT_DBG(slot, "sending rerank result, res.score = %f\n", res->score);
  1135. queue_results.send(std::move(res));
  1136. }
  1137. //
  1138. // Functions to process the task
  1139. //
  1140. // tokenize the input if it's set by CLI, return false on error
  1141. bool tokenize_cli_input(server_task & task) {
  1142. if (task.cli_input == nullptr) {
  1143. return true; // nothing to do
  1144. }
  1145. try {
  1146. auto & opt = oai_parser_opt;
  1147. common_chat_templates_inputs inputs;
  1148. inputs.messages = common_chat_msgs_parse_oaicompat(task.cli_input);
  1149. inputs.tools = {}; // TODO
  1150. inputs.tool_choice = COMMON_CHAT_TOOL_CHOICE_NONE;
  1151. inputs.json_schema = ""; // TODO
  1152. inputs.grammar = ""; // TODO
  1153. inputs.use_jinja = opt.use_jinja;
  1154. inputs.parallel_tool_calls = false;
  1155. inputs.add_generation_prompt = true;
  1156. inputs.reasoning_format = opt.reasoning_format;
  1157. inputs.enable_thinking = opt.enable_thinking;
  1158. // Apply chat template to the list of messages
  1159. auto chat_params = common_chat_templates_apply(opt.tmpls, inputs);
  1160. // tokenize the resulting prompt
  1161. auto & prompt = chat_params.prompt;
  1162. if (mctx != nullptr) {
  1163. task.tokens = process_mtmd_prompt(mctx, prompt, task.cli_files);
  1164. } else {
  1165. task.tokens = std::move(tokenize_input_prompts(vocab, mctx, prompt, true, true)[0]);
  1166. }
  1167. task.cli_input.clear();
  1168. task.cli_files.clear();
  1169. } catch (const std::exception & e) {
  1170. send_error(task, std::string("Failed to format input: ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
  1171. return false;
  1172. }
  1173. return true;
  1174. }
  1175. void process_single_task(server_task && task) {
  1176. switch (task.type) {
  1177. case SERVER_TASK_TYPE_COMPLETION:
  1178. case SERVER_TASK_TYPE_INFILL:
  1179. case SERVER_TASK_TYPE_EMBEDDING:
  1180. case SERVER_TASK_TYPE_RERANK:
  1181. {
  1182. if (!tokenize_cli_input(task)) {
  1183. break;
  1184. }
  1185. const int id_slot = task.id_slot;
  1186. server_slot * slot = id_slot != -1 ? get_slot_by_id(id_slot) : get_available_slot(task);
  1187. if (slot == nullptr) {
  1188. // if no slot is available, we defer this task for processing later
  1189. SRV_DBG("no slot is available, defer task, id_task = %d\n", task.id);
  1190. queue_tasks.defer(std::move(task));
  1191. break;
  1192. }
  1193. if (slot->is_processing()) {
  1194. // if requested slot is unavailable, we defer this task for processing later
  1195. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1196. queue_tasks.defer(std::move(task));
  1197. break;
  1198. }
  1199. if (!launch_slot_with_task(*slot, std::move(task))) {
  1200. SRV_ERR("failed to launch slot with task, id_task = %d\n", task.id);
  1201. break;
  1202. }
  1203. } break;
  1204. case SERVER_TASK_TYPE_CANCEL:
  1205. {
  1206. // release slot linked with the task id
  1207. for (auto & slot : slots) {
  1208. if (slot.task && slot.task->id == task.id_target) {
  1209. slot.release();
  1210. break;
  1211. }
  1212. }
  1213. } break;
  1214. case SERVER_TASK_TYPE_NEXT_RESPONSE:
  1215. {
  1216. // do nothing
  1217. } break;
  1218. case SERVER_TASK_TYPE_METRICS:
  1219. {
  1220. json slots_data = json::array();
  1221. int n_idle_slots = 0;
  1222. int n_processing_slots = 0;
  1223. for (server_slot & slot : slots) {
  1224. json slot_data = slot.to_json(slots_debug == 0);
  1225. if (slot.is_processing()) {
  1226. n_processing_slots++;
  1227. } else {
  1228. n_idle_slots++;
  1229. }
  1230. slots_data.push_back(slot_data);
  1231. }
  1232. SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots);
  1233. auto res = std::make_unique<server_task_result_metrics>();
  1234. res->id = task.id;
  1235. res->slots_data = std::move(slots_data);
  1236. res->n_idle_slots = n_idle_slots;
  1237. res->n_processing_slots = n_processing_slots;
  1238. res->n_tasks_deferred = queue_tasks.queue_tasks_deferred_size();
  1239. res->t_start = metrics.t_start;
  1240. res->n_prompt_tokens_processed_total = metrics.n_prompt_tokens_processed_total;
  1241. res->t_prompt_processing_total = metrics.t_prompt_processing_total;
  1242. res->n_tokens_predicted_total = metrics.n_tokens_predicted_total;
  1243. res->t_tokens_generation_total = metrics.t_tokens_generation_total;
  1244. res->n_tokens_max = metrics.n_tokens_max;
  1245. res->n_prompt_tokens_processed = metrics.n_prompt_tokens_processed;
  1246. res->t_prompt_processing = metrics.t_prompt_processing;
  1247. res->n_tokens_predicted = metrics.n_tokens_predicted;
  1248. res->t_tokens_generation = metrics.t_tokens_generation;
  1249. res->n_decode_total = metrics.n_decode_total;
  1250. res->n_busy_slots_total = metrics.n_busy_slots_total;
  1251. if (task.metrics_reset_bucket) {
  1252. metrics.reset_bucket();
  1253. }
  1254. queue_results.send(std::move(res));
  1255. } break;
  1256. case SERVER_TASK_TYPE_SLOT_SAVE:
  1257. {
  1258. if (!check_no_mtmd(task.id)) {
  1259. break;
  1260. }
  1261. int id_slot = task.slot_action.slot_id;
  1262. server_slot * slot = get_slot_by_id(id_slot);
  1263. if (slot == nullptr) {
  1264. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1265. break;
  1266. }
  1267. if (slot->is_processing()) {
  1268. // if requested slot is unavailable, we defer this task for processing later
  1269. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1270. queue_tasks.defer(std::move(task));
  1271. break;
  1272. }
  1273. const size_t token_count = slot->prompt.tokens.size();
  1274. const int64_t t_start = ggml_time_us();
  1275. std::string filename = task.slot_action.filename;
  1276. std::string filepath = task.slot_action.filepath;
  1277. const llama_tokens & tokens = slot->prompt.tokens.get_text_tokens();
  1278. const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id, tokens.data(), token_count);
  1279. const int64_t t_end = ggml_time_us();
  1280. const double t_save_ms = (t_end - t_start) / 1000.0;
  1281. auto res = std::make_unique<server_task_result_slot_save_load>();
  1282. res->id = task.id;
  1283. res->id_slot = id_slot;
  1284. res->filename = filename;
  1285. res->is_save = true;
  1286. res->n_tokens = token_count;
  1287. res->n_bytes = nwrite;
  1288. res->t_ms = t_save_ms;
  1289. queue_results.send(std::move(res));
  1290. } break;
  1291. case SERVER_TASK_TYPE_SLOT_RESTORE:
  1292. {
  1293. if (!check_no_mtmd(task.id)) break;
  1294. int id_slot = task.slot_action.slot_id;
  1295. server_slot * slot = get_slot_by_id(id_slot);
  1296. if (slot == nullptr) {
  1297. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1298. break;
  1299. }
  1300. if (slot->is_processing()) {
  1301. // if requested slot is unavailable, we defer this task for processing later
  1302. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1303. queue_tasks.defer(std::move(task));
  1304. break;
  1305. }
  1306. const int64_t t_start = ggml_time_us();
  1307. std::string filename = task.slot_action.filename;
  1308. std::string filepath = task.slot_action.filepath;
  1309. llama_tokens tokens;
  1310. tokens.resize(slot->n_ctx);
  1311. size_t token_count = 0;
  1312. size_t nread = llama_state_seq_load_file(ctx, filepath.c_str(), slot->id, tokens.data(), tokens.size(), &token_count);
  1313. if (nread == 0) {
  1314. slot->prompt.tokens.clear(); // KV may already been invalidated?
  1315. send_error(task, "Unable to restore slot, no available space in KV cache or invalid slot save file", ERROR_TYPE_INVALID_REQUEST);
  1316. break;
  1317. }
  1318. tokens.resize(token_count);
  1319. slot->prompt.tokens.clear();
  1320. slot->prompt.tokens.insert(tokens);
  1321. const int64_t t_end = ggml_time_us();
  1322. const double t_restore_ms = (t_end - t_start) / 1000.0;
  1323. auto res = std::make_unique<server_task_result_slot_save_load>();
  1324. res->id = task.id;
  1325. res->id_slot = id_slot;
  1326. res->filename = filename;
  1327. res->is_save = false;
  1328. res->n_tokens = token_count;
  1329. res->n_bytes = nread;
  1330. res->t_ms = t_restore_ms;
  1331. queue_results.send(std::move(res));
  1332. } break;
  1333. case SERVER_TASK_TYPE_SLOT_ERASE:
  1334. {
  1335. if (!check_no_mtmd(task.id)) {
  1336. break;
  1337. }
  1338. int id_slot = task.slot_action.slot_id;
  1339. server_slot * slot = get_slot_by_id(id_slot);
  1340. if (slot == nullptr) {
  1341. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1342. break;
  1343. }
  1344. if (slot->is_processing()) {
  1345. // if requested slot is unavailable, we defer this task for processing later
  1346. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1347. queue_tasks.defer(std::move(task));
  1348. break;
  1349. }
  1350. // Erase token cache
  1351. const size_t n_erased = slot->prompt.tokens.size();
  1352. clear_slot(*slot);
  1353. auto res = std::make_unique<server_task_result_slot_erase>();
  1354. res->id = task.id;
  1355. res->id_slot = id_slot;
  1356. res->n_erased = n_erased;
  1357. queue_results.send(std::move(res));
  1358. } break;
  1359. case SERVER_TASK_TYPE_SET_LORA:
  1360. {
  1361. params_base.lora_adapters = std::move(task.set_lora);
  1362. auto res = std::make_unique<server_task_result_apply_lora>();
  1363. res->id = task.id;
  1364. queue_results.send(std::move(res));
  1365. } break;
  1366. }
  1367. }
  1368. void update_slots() {
  1369. // check if all slots are idle
  1370. {
  1371. bool all_idle = true;
  1372. for (auto & slot : slots) {
  1373. if (slot.is_processing()) {
  1374. all_idle = false;
  1375. break;
  1376. }
  1377. }
  1378. if (all_idle) {
  1379. SRV_INF("%s", "all slots are idle\n");
  1380. return;
  1381. }
  1382. }
  1383. {
  1384. SRV_DBG("%s", "posting NEXT_RESPONSE\n");
  1385. server_task task(SERVER_TASK_TYPE_NEXT_RESPONSE);
  1386. task.id = queue_tasks.get_new_id();
  1387. queue_tasks.post(std::move(task));
  1388. }
  1389. // apply context-shift if needed
  1390. // TODO: simplify and improve
  1391. for (server_slot & slot : slots) {
  1392. if (slot.state == SLOT_STATE_GENERATING && slot.prompt.n_tokens() + 1 >= slot.n_ctx) {
  1393. if (!params_base.ctx_shift) {
  1394. // this check is redundant (for good)
  1395. // we should never get here, because generation should already stopped in process_token()
  1396. send_error(slot, "context shift is disabled", ERROR_TYPE_SERVER);
  1397. slot.release();
  1398. continue;
  1399. }
  1400. if (mctx) {
  1401. // we should never reach this because params_base.ctx_shift is automatically disabled if mmproj is loaded
  1402. // we don't support ctx_shift because an image chunk may contains multiple tokens
  1403. GGML_ABORT("not supported by multimodal");
  1404. }
  1405. if (slot.is_parent() || slot.is_child()) {
  1406. send_error(slot, "context shift cannot be used for shared prompt", ERROR_TYPE_SERVER);
  1407. slot.release();
  1408. continue;
  1409. }
  1410. // Shift context
  1411. int n_keep = slot.task->params.n_keep < 0 ? slot.task->n_tokens() : slot.task->params.n_keep;
  1412. if (add_bos_token) {
  1413. n_keep += 1;
  1414. }
  1415. n_keep = std::min(slot.n_ctx - 4, n_keep);
  1416. const int n_left = slot.prompt.n_tokens() - n_keep;
  1417. const int n_discard = slot.task->params.n_discard ? slot.task->params.n_discard : (n_left / 2);
  1418. SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard);
  1419. llama_memory_seq_rm (llama_get_memory(ctx), slot.id, n_keep , n_keep + n_discard);
  1420. llama_memory_seq_add(llama_get_memory(ctx), slot.id, n_keep + n_discard, slot.prompt.n_tokens(), -n_discard);
  1421. // add generated tokens to cache
  1422. // ref: https://github.com/ggml-org/llama.cpp/pull/16818#discussion_r2473269481
  1423. {
  1424. GGML_ASSERT(!slot.prompt.tokens.has_mtmd);
  1425. llama_tokens new_tokens = slot.prompt.tokens.get_text_tokens(); // copy
  1426. for (size_t i = n_keep + n_discard; i < new_tokens.size(); i++) {
  1427. new_tokens[i - n_discard] = new_tokens[i];
  1428. }
  1429. new_tokens.resize(slot.prompt.tokens.size() - n_discard);
  1430. slot.prompt.tokens.clear();
  1431. slot.prompt.tokens.insert(new_tokens);
  1432. }
  1433. slot.truncated = true;
  1434. }
  1435. }
  1436. // start populating the batch for this iteration
  1437. common_batch_clear(batch);
  1438. // track if given slot can be batched with slots already in the batch
  1439. server_slot * slot_batched = nullptr;
  1440. auto accept_special_token = [&](server_slot & slot, llama_token token) {
  1441. return params_base.special ||
  1442. slot.task->params.sampling.preserved_tokens.find(token) != slot.task->params.sampling.preserved_tokens.end();
  1443. };
  1444. // first, add sampled tokens from any ongoing sequences
  1445. for (auto & slot : slots) {
  1446. if (slot.state != SLOT_STATE_GENERATING) {
  1447. continue;
  1448. }
  1449. // check if we can batch this slot with the previous one
  1450. if (!slot_batched) {
  1451. slot_batched = &slot;
  1452. } else if (!slot_batched->can_batch_with(slot)) {
  1453. continue;
  1454. }
  1455. // generate draft tokens in speculative decoding mode
  1456. // TODO: rework to have a single draft llama_context shared across all slots [TAG_SERVER_SPEC_REWORK]
  1457. // perform the speculative drafting for all sequences at the same time in a single batch
  1458. int n_draft_max = slot.get_n_draft_max();
  1459. if (n_draft_max > 0) {
  1460. if (mctx) {
  1461. // we should never reach this, as speculative is automatically disabled if mmproj is loaded
  1462. GGML_ABORT("not supported by multimodal");
  1463. }
  1464. struct common_speculative_params params_spec;
  1465. params_spec.n_draft = n_draft_max;
  1466. params_spec.n_reuse = llama_n_ctx(slot.ctx_dft) - slot.task->params.speculative.n_max;
  1467. params_spec.p_min = slot.task->params.speculative.p_min;
  1468. const llama_tokens & cached_text_tokens = slot.prompt.tokens.get_text_tokens();
  1469. llama_tokens draft = common_speculative_gen_draft(slot.spec, params_spec, cached_text_tokens, slot.sampled);
  1470. // add the sampled token to the batch
  1471. slot.i_batch_dft.push_back(batch.n_tokens);
  1472. common_batch_add(batch, slot.sampled, slot.prompt.tokens.pos_next(), { slot.id }, true);
  1473. slot.prompt.tokens.push_back(slot.sampled);
  1474. if (slot.task->params.speculative.n_min > (int) draft.size()) {
  1475. SLT_DBG(slot, "ignoring small draft: %d < %d\n", (int) draft.size(), slot.task->params.speculative.n_min);
  1476. // fallback to normal decoding
  1477. slot.i_batch = slot.i_batch_dft[0];
  1478. slot.drafted.clear();
  1479. slot.i_batch_dft.clear();
  1480. } else {
  1481. // keep track of total number of drafted tokens tested
  1482. slot.n_draft_total += draft.size();
  1483. // add all drafted tokens to the batch
  1484. for (size_t i = 0; i < draft.size(); i++) {
  1485. slot.i_batch_dft.push_back(batch.n_tokens);
  1486. common_batch_add(batch, draft[i], slot.prompt.tokens.pos_next(), { slot.id }, true);
  1487. slot.prompt.tokens.push_back(draft[i]);
  1488. }
  1489. slot.drafted = std::move(draft);
  1490. }
  1491. } else {
  1492. // no speculative decoding
  1493. slot.i_batch = batch.n_tokens;
  1494. common_batch_add(batch, slot.sampled, slot.prompt.tokens.pos_next(), { slot.id }, true);
  1495. slot.prompt.tokens.push_back(slot.sampled);
  1496. SLT_DBG(slot, "slot decode token, n_ctx = %d, n_tokens = %d, truncated = %d\n",
  1497. slot.n_ctx, slot.prompt.n_tokens(), slot.truncated);
  1498. }
  1499. }
  1500. // process in chunks of params.n_batch
  1501. int32_t n_batch = llama_n_batch(ctx);
  1502. int32_t n_ubatch = llama_n_ubatch(ctx);
  1503. float alora_scale = -1.0f;
  1504. size_t alora_disabled_id = 0;
  1505. // next, batch any pending prompts without exceeding n_batch
  1506. if (params_base.cont_batching || batch.n_tokens == 0) {
  1507. for (auto & slot : slots) {
  1508. if (!slot.is_processing()) {
  1509. continue;
  1510. }
  1511. // check if we can batch this slot with the previous one
  1512. if (slot_batched && !slot_batched->can_batch_with(slot)) {
  1513. continue;
  1514. }
  1515. // this slot still has a prompt to be processed
  1516. if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) {
  1517. const auto & input_tokens = slot.task->tokens;
  1518. // TODO: maybe move branch to outside of this loop in the future
  1519. if (slot.state == SLOT_STATE_STARTED) {
  1520. slot.t_start_process_prompt = ggml_time_us();
  1521. slot.t_start_generation = 0;
  1522. slot.state = SLOT_STATE_PROCESSING_PROMPT;
  1523. SLT_INF(slot, "new prompt, n_ctx_slot = %d, n_keep = %d, task.n_tokens = %d\n",
  1524. slot.n_ctx, slot.task->params.n_keep, slot.task->n_tokens());
  1525. // print prompt tokens (for debugging)
  1526. /*if (1) {
  1527. // first 16 tokens (avoid flooding logs)
  1528. for (int i = 0; i < std::min<int>(16, input_tokens.size()); i++) {
  1529. SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, input_tokens[i], common_token_to_piece(ctx, input_tokens[i]).c_str());
  1530. }
  1531. } else {
  1532. // all
  1533. for (int i = 0; i < (int) input_tokens.size(); i++) {
  1534. SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, input_tokens[i], common_token_to_piece(ctx, input_tokens[i]).c_str());
  1535. }
  1536. }*/
  1537. // keep track how many tokens we can reuse from the previous state
  1538. int n_past = 0;
  1539. // empty prompt passed -> release the slot and send empty response
  1540. if (input_tokens.empty()) {
  1541. SLT_WRN(slot, "%s", "empty prompt - releasing slot\n");
  1542. slot.print_timings();
  1543. send_final_response(slot);
  1544. slot.release();
  1545. continue;
  1546. }
  1547. // TODO: support memory-less logits computation
  1548. if (slot.need_logits() && !llama_get_memory(ctx)) {
  1549. send_error(slot, "the current context does not logits computation. skipping", ERROR_TYPE_SERVER);
  1550. slot.release();
  1551. continue;
  1552. }
  1553. if (!slot.can_split()) {
  1554. if (slot.task->n_tokens() > n_ubatch) {
  1555. send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
  1556. slot.release();
  1557. continue;
  1558. }
  1559. if (slot.task->n_tokens() > slot.n_ctx) {
  1560. send_error(slot, "input is larger than the max context size. skipping", ERROR_TYPE_EXCEED_CONTEXT_SIZE);
  1561. slot.release();
  1562. continue;
  1563. }
  1564. } else {
  1565. if (slot.task->n_tokens() >= slot.n_ctx) {
  1566. send_error(slot, "the request exceeds the available context size, try increasing it", ERROR_TYPE_EXCEED_CONTEXT_SIZE);
  1567. slot.release();
  1568. continue;
  1569. }
  1570. if (slot.task->params.cache_prompt) {
  1571. // reuse any previously computed tokens that are common with the new prompt
  1572. n_past = slot.prompt.tokens.get_common_prefix(input_tokens);
  1573. // if there is an alora invoked, don't cache after the invocation start
  1574. if (slot.alora_invocation_start > 0) {
  1575. SLT_DBG(slot, "only caching to alora invocation start (n_past = %d, alora_invocation_start = %d)\n", n_past, slot.alora_invocation_start);
  1576. n_past = std::min(n_past, slot.alora_invocation_start - 1);
  1577. }
  1578. const auto n_cache_reuse = slot.task->params.n_cache_reuse;
  1579. const bool can_cache_reuse =
  1580. llama_memory_can_shift(llama_get_memory(ctx)) &&
  1581. !slot.prompt.tokens.has_mtmd;
  1582. if (!can_cache_reuse && n_cache_reuse > 0) {
  1583. SLT_WRN(slot, "cache reuse is not supported - ignoring n_cache_reuse = %d\n", n_cache_reuse);
  1584. }
  1585. // reuse chunks from the cached prompt by shifting their KV cache in the new position
  1586. if (can_cache_reuse && n_cache_reuse > 0) {
  1587. GGML_ASSERT(!slot.prompt.tokens.has_mtmd);
  1588. size_t head_c = n_past; // cache
  1589. size_t head_p = n_past; // current prompt
  1590. if (mctx) {
  1591. // we should never reach this
  1592. GGML_ABORT("not supported by multimodal");
  1593. }
  1594. SLT_DBG(slot, "trying to reuse chunks with size > %d, n_past = %d\n", n_cache_reuse, n_past);
  1595. while (head_c < slot.prompt.tokens.size() &&
  1596. head_p < input_tokens.size()) {
  1597. size_t n_match = 0;
  1598. while (head_c + n_match < slot.prompt.tokens.size() &&
  1599. head_p + n_match < input_tokens.size() &&
  1600. slot.prompt.tokens[head_c + n_match] == input_tokens[head_p + n_match]) {
  1601. n_match++;
  1602. }
  1603. if (n_match >= (size_t) n_cache_reuse) {
  1604. SLT_INF(slot, "reusing chunk with size %zu, shifting KV cache [%zu, %zu) -> [%zu, %zu)\n", n_match, head_c, head_c + n_match, head_p, head_p + n_match);
  1605. //for (size_t i = head_p; i < head_p + n_match; i++) {
  1606. // SLT_DBG(slot, "cache token %3zu: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
  1607. //}
  1608. const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c;
  1609. llama_memory_seq_rm (llama_get_memory(ctx), slot.id, head_p, head_c);
  1610. llama_memory_seq_add(llama_get_memory(ctx), slot.id, head_c, head_c + n_match, kv_shift);
  1611. for (size_t i = 0; i < n_match; i++) {
  1612. slot.prompt.tokens.set_token(head_p + i, slot.prompt.tokens[head_c + i]);
  1613. n_past++;
  1614. }
  1615. head_c += n_match;
  1616. head_p += n_match;
  1617. } else {
  1618. head_c += 1;
  1619. }
  1620. }
  1621. SLT_DBG(slot, "after context reuse, new n_past = %d\n", n_past);
  1622. }
  1623. } else {
  1624. // if we don't cache the prompt, we have to remove all previous tokens
  1625. n_past = 0;
  1626. }
  1627. // note: when n_swa == 0, the model does not use SWA, which is equivalent to a window of 1
  1628. const auto n_swa = std::max(1, llama_model_n_swa(model));
  1629. // the largest pos_min required for a checkpoint to be useful
  1630. const auto pos_min_thold = std::max(0, n_past - n_swa);
  1631. // note: disallow with mtmd contexts for now
  1632. // https://github.com/ggml-org/llama.cpp/issues/17043
  1633. if (!mctx && n_past > 0 && n_past < slot.prompt.n_tokens()) {
  1634. const auto pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx), slot.id);
  1635. if (pos_min == -1) {
  1636. SLT_ERR(slot, "n_past = %d, slot.prompt.tokens.size() = %d, seq_id = %d, pos_min = %d\n", n_past, (int) slot.prompt.tokens.size(), slot.id, pos_min);
  1637. GGML_ABORT("pos_min == -1, but n_past > 0 - should not happen: https://github.com/ggml-org/llama.cpp/pull/13833#discussion_r2116181237");
  1638. }
  1639. // when the prompt prefix does not match, print the tokens around the mismatch
  1640. // this is useful for debugging prompt caching
  1641. if (slots_debug) {
  1642. const int np0 = std::max<int>(n_past - 4, 0);
  1643. const int np1 = std::min<int>(n_past + 6, std::min(slot.prompt.tokens.size(), slot.task->tokens.size()));
  1644. std::stringstream ss0;
  1645. std::stringstream ss1;
  1646. std::stringstream st0;
  1647. std::stringstream st1;
  1648. ss0 << "old: ... ";
  1649. ss1 << "new: ... ";
  1650. for (int i = np0; i < np1; i++) {
  1651. if (i == n_past) {
  1652. ss0 << " | ";
  1653. ss1 << " | ";
  1654. }
  1655. {
  1656. const auto token = slot.prompt.tokens[i];
  1657. const auto piece = token != LLAMA_TOKEN_NULL ? common_token_to_piece(ctx, token) : "[mtmd]";
  1658. ss0 << piece;
  1659. st0 << std::setw(8) << token;
  1660. }
  1661. {
  1662. const auto token = slot.task->tokens[i];
  1663. const auto piece = token != LLAMA_TOKEN_NULL ? common_token_to_piece(ctx, token) : "[mtmd]";
  1664. ss1 << piece;
  1665. st1 << std::setw(8) << token;
  1666. }
  1667. }
  1668. SLT_WRN(slot, "%s\n", ss0.str().c_str());
  1669. SLT_WRN(slot, "%s\n", ss1.str().c_str());
  1670. SLT_WRN(slot, "%s\n", st0.str().c_str());
  1671. SLT_WRN(slot, "%s\n", st1.str().c_str());
  1672. }
  1673. if (pos_min > pos_min_thold) {
  1674. // TODO: support can be added in the future when corresponding vision models get released
  1675. GGML_ASSERT(!slot.prompt.tokens.has_mtmd);
  1676. SLT_WRN(slot, "n_past = %d, slot.prompt.tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", n_past, (int) slot.prompt.tokens.size(), slot.id, pos_min, n_swa);
  1677. // search for a context checkpoint
  1678. const auto it = std::find_if(
  1679. slot.prompt.checkpoints.rbegin(),
  1680. slot.prompt.checkpoints.rend(),
  1681. [&](const auto & cur) {
  1682. // guarantee that a checkpoint will result in at least one token being processed [TAG_PROMPT_LOGITS]
  1683. return cur.pos_min < pos_min_thold;
  1684. }
  1685. );
  1686. bool do_reset = it == slot.prompt.checkpoints.rend();
  1687. if (!do_reset) {
  1688. // restore the context checkpoint
  1689. const size_t checkpoint_size = it->data.size();
  1690. const size_t n = llama_state_seq_set_data_ext(ctx, it->data.data(), checkpoint_size, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
  1691. if (n != checkpoint_size) {
  1692. SLT_ERR(slot, "failed to restore context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", it->pos_min, it->pos_max, (float) checkpoint_size / 1024 / 1024);
  1693. do_reset = true;
  1694. //printf("[DEBUG] `do_reset` was set to `true` after failing to restore a checkpoint");
  1695. } else {
  1696. n_past = std::min(n_past, std::max(it->pos_min + 1, it->pos_max));
  1697. SLT_WRN(slot, "restored context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n", it->pos_min, it->pos_max, (float) checkpoint_size / 1024 / 1024);
  1698. }
  1699. }
  1700. if (do_reset) {
  1701. SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA or hybrid/recurrent memory, see %s)\n",
  1702. "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
  1703. n_past = 0;
  1704. }
  1705. }
  1706. }
  1707. {
  1708. // erase any checkpoints with pos_min > pos_min_thold
  1709. for (auto it = slot.prompt.checkpoints.begin(); it != slot.prompt.checkpoints.end();) {
  1710. const auto & cur = *it;
  1711. if (cur.pos_min > pos_min_thold) {
  1712. SLT_WRN(slot, "erased invalidated context checkpoint (pos_min = %d, pos_max = %d, n_swa = %d, size = %.3f MiB)\n", cur.pos_min, cur.pos_max, n_swa, (float) cur.data.size() / 1024 / 1024);
  1713. it = slot.prompt.checkpoints.erase(it);
  1714. } else {
  1715. ++it;
  1716. }
  1717. }
  1718. }
  1719. }
  1720. // [TAG_PROMPT_LOGITS]
  1721. if (n_past == slot.task->n_tokens() && n_past > 0) {
  1722. SLT_WRN(slot, "need to evaluate at least 1 token for each active slot (n_past = %d, task.n_tokens() = %d)\n", n_past, slot.task->n_tokens());
  1723. n_past--;
  1724. SLT_WRN(slot, "n_past was set to %d\n", n_past);
  1725. }
  1726. slot.n_prompt_tokens_cache = n_past;
  1727. slot.n_prompt_tokens_processed = 0;
  1728. slot.prompt.tokens.keep_first(n_past);
  1729. }
  1730. if (!slot.can_split()) {
  1731. // cannot fit the prompt in the current batch - will try next iter
  1732. if (batch.n_tokens + slot.task->n_tokens() > n_batch) {
  1733. continue;
  1734. }
  1735. }
  1736. // truncate any tokens that are beyond n_past for this slot
  1737. const llama_pos p0 = slot.prompt.tokens.pos_next();
  1738. SLT_INF(slot, "n_tokens = %d, memory_seq_rm [%d, end)\n", slot.prompt.n_tokens(), p0);
  1739. if (!llama_memory_seq_rm(llama_get_memory(ctx), slot.id, p0, -1)) {
  1740. SLT_WRN(slot, "failed to truncate tokens with position >= %d - clearing the memory\n", p0);
  1741. clear_slot(slot);
  1742. // there is no common part left
  1743. slot.n_prompt_tokens_cache = 0;
  1744. }
  1745. // check if we should process the image
  1746. if (slot.prompt.n_tokens() < slot.task->n_tokens() && input_tokens[slot.prompt.n_tokens()] == LLAMA_TOKEN_NULL) {
  1747. // process the image
  1748. size_t n_tokens_out = 0;
  1749. int32_t res = input_tokens.process_chunk(ctx, mctx, slot.prompt.n_tokens(), slot.prompt.tokens.pos_next(), slot.id, n_tokens_out);
  1750. if (res != 0) {
  1751. SLT_ERR(slot, "failed to process image, res = %d\n", res);
  1752. send_error(slot, "failed to process image", ERROR_TYPE_SERVER);
  1753. slot.release();
  1754. continue;
  1755. }
  1756. slot.n_prompt_tokens_processed += n_tokens_out;
  1757. // add the image chunk to cache
  1758. {
  1759. const auto & chunk = input_tokens.find_chunk(slot.prompt.n_tokens());
  1760. slot.prompt.tokens.push_back(chunk.get()); // copy
  1761. }
  1762. }
  1763. // If using an alora, there may be uncached tokens that come
  1764. // before the invocation sequence. When this happens, the
  1765. // tokens before the invocation sequence need to be
  1766. // processed without the adapter in a separate batch, then
  1767. // the adapter needs to be enabled for the remaining tokens.
  1768. if (lora_all_alora(slot.lora) && slot.alora_invocation_start - 1 > slot.prompt.n_tokens()) {
  1769. SLT_DBG(slot, "processing pre-alora tokens without the adapter (n_tokens = %d, alora_invocation_start = %d)\n", slot.prompt.n_tokens(), slot.alora_invocation_start);
  1770. const auto & enabled_loras = lora_get_enabled_ids(slot.lora);
  1771. GGML_ASSERT(enabled_loras.size() == 1);
  1772. alora_scale = slot.lora[enabled_loras[0]].scale;
  1773. slot.lora[enabled_loras[0]].scale = 0.0f;
  1774. alora_disabled_id = enabled_loras[0];
  1775. }
  1776. bool do_checkpoint = params_base.n_ctx_checkpoints > 0;
  1777. // make checkpoints only for completion tasks
  1778. do_checkpoint = do_checkpoint && slot.task->type == SERVER_TASK_TYPE_COMPLETION;
  1779. // make a checkpoint of the parts of the memory that cannot be rolled back.
  1780. // checkpoints are created only if:
  1781. // - the model uses SWA and we are not using `swa_full`
  1782. // - the model architecture is marked as recurrent or hybrid
  1783. //
  1784. // TODO: try to make this conditional on the context or the memory module, instead of the model type
  1785. do_checkpoint = do_checkpoint && (
  1786. llama_model_is_recurrent(model) ||
  1787. llama_model_is_hybrid(model) ||
  1788. (llama_model_n_swa(model) > 0 && !params_base.swa_full)
  1789. );
  1790. // add prompt tokens for processing in the current batch
  1791. while (slot.prompt.n_tokens() < slot.task->n_tokens() && batch.n_tokens < n_batch) {
  1792. // get next token to process
  1793. llama_token cur_tok = input_tokens[slot.prompt.n_tokens()];
  1794. if (cur_tok == LLAMA_TOKEN_NULL) {
  1795. break; // end of text chunk
  1796. }
  1797. // if this is an alora request with pre-invocation
  1798. // tokens that are not cached, we need to stop filling
  1799. // this batch at those pre-invocation tokens.
  1800. if (alora_scale > 0 && slot.prompt.n_tokens() == slot.alora_invocation_start - 1) {
  1801. SLT_DBG(slot, "stop prompt batch filling at (n_tokens = %d, alora_invocation_start = %d)\n", slot.prompt.n_tokens(), slot.alora_invocation_start);
  1802. break;
  1803. }
  1804. // embedding requires all tokens in the batch to be output
  1805. common_batch_add(batch,
  1806. cur_tok,
  1807. slot.prompt.tokens.pos_next(),
  1808. { slot.id },
  1809. slot.need_embd());
  1810. slot.prompt.tokens.push_back(cur_tok);
  1811. slot.n_prompt_tokens_processed++;
  1812. // process the last few tokens of the prompt separately in order to allow for a checkpoint to be created.
  1813. if (do_checkpoint && slot.task->n_tokens() - slot.prompt.n_tokens() == 64) {
  1814. break;
  1815. }
  1816. }
  1817. // SLT_INF(slot, "new slot.prompt.tokens: %s\n", slot.slot.prompt.tokens.str().c_str());
  1818. SLT_INF(slot, "prompt processing progress, n_tokens = %d, batch.n_tokens = %d, progress = %f\n", slot.prompt.n_tokens(), batch.n_tokens, (float) slot.prompt.n_tokens() / slot.task->n_tokens());
  1819. // entire prompt has been processed
  1820. if (slot.prompt.n_tokens() == slot.task->n_tokens()) {
  1821. slot.state = SLOT_STATE_DONE_PROMPT;
  1822. GGML_ASSERT(batch.n_tokens > 0);
  1823. common_sampler_reset(slot.smpl.get());
  1824. // Process all prompt tokens through sampler system
  1825. for (int i = 0; i < slot.task->n_tokens(); ++i) {
  1826. llama_token id = input_tokens[i];
  1827. if (id != LLAMA_TOKEN_NULL) {
  1828. common_sampler_accept(slot.smpl.get(), id, false);
  1829. }
  1830. }
  1831. // extract the logits only for the last token
  1832. batch.logits[batch.n_tokens - 1] = true;
  1833. slot.n_decoded = 0;
  1834. slot.i_batch = batch.n_tokens - 1;
  1835. SLT_INF(slot, "prompt done, n_tokens = %d, batch.n_tokens = %d\n", slot.prompt.n_tokens(), batch.n_tokens);
  1836. const auto pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx), slot.id);
  1837. const auto pos_max = llama_memory_seq_pos_max(llama_get_memory(ctx), slot.id);
  1838. // no need for empty or small checkpoints
  1839. do_checkpoint = do_checkpoint && (pos_min >= 0 && pos_max >= 64);
  1840. // no need to create checkpoints that are too close together
  1841. do_checkpoint = do_checkpoint && (slot.prompt.checkpoints.empty() || pos_max > slot.prompt.checkpoints.back().pos_max + 64);
  1842. if (do_checkpoint) {
  1843. while (slot.prompt.checkpoints.size() >= (size_t) params_base.n_ctx_checkpoints) {
  1844. // make room for the new checkpoint, if needed
  1845. const auto & cur = slot.prompt.checkpoints.front();
  1846. SLT_WRN(slot, "erasing old context checkpoint (pos_min = %d, pos_max = %d, size = %.3f MiB)\n",
  1847. cur.pos_min, cur.pos_max, (float) cur.data.size() / 1024 / 1024);
  1848. slot.prompt.checkpoints.erase(slot.prompt.checkpoints.begin());
  1849. }
  1850. const size_t checkpoint_size = llama_state_seq_get_size_ext(ctx, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
  1851. auto & cur = slot.prompt.checkpoints.emplace_back(server_prompt_checkpoint{
  1852. /*.pos_min = */ pos_min,
  1853. /*.pos_max = */ pos_max,
  1854. /*.data = */ std::vector<uint8_t>(checkpoint_size),
  1855. });
  1856. llama_state_seq_get_data_ext(ctx, cur.data.data(), checkpoint_size, slot.id, LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY);
  1857. SLT_WRN(slot, "created context checkpoint %d of %d (pos_min = %d, pos_max = %d, size = %.3f MiB)\n",
  1858. (int) slot.prompt.checkpoints.size(), params_base.n_ctx_checkpoints, cur.pos_min, cur.pos_max, (float) cur.data.size() / 1024 / 1024);
  1859. }
  1860. }
  1861. }
  1862. if (!slot_batched) {
  1863. slot_batched = &slot;
  1864. }
  1865. if (batch.n_tokens >= n_batch) {
  1866. break;
  1867. }
  1868. }
  1869. }
  1870. if (batch.n_tokens == 0) {
  1871. SRV_WRN("%s", "no tokens to decode\n");
  1872. return;
  1873. }
  1874. SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens);
  1875. if (slot_batched) {
  1876. // apply lora, only need to do it once per batch
  1877. common_set_adapter_lora(ctx, slot_batched->lora);
  1878. // if the lora is temporarily disabled for an alora, re-enable it
  1879. // for next time
  1880. if (alora_scale > 0.0f) {
  1881. SRV_DBG("re-enabling alora with scale %f\n", alora_scale);
  1882. slot_batched->lora[alora_disabled_id].scale = alora_scale;
  1883. }
  1884. llama_set_embeddings(ctx, slot_batched->need_embd());
  1885. }
  1886. int32_t i_next = 0;
  1887. // process the created batch of tokens
  1888. for (int32_t i = 0; i < batch.n_tokens; i = i_next) {
  1889. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1890. llama_batch batch_view = {
  1891. n_tokens,
  1892. batch.token + i,
  1893. nullptr,
  1894. batch.pos + i,
  1895. batch.n_seq_id + i,
  1896. batch.seq_id + i,
  1897. batch.logits + i,
  1898. };
  1899. const int ret = llama_decode(ctx, batch_view);
  1900. metrics.on_decoded(slots);
  1901. if (ret != 0) {
  1902. {
  1903. std::string err;
  1904. if (n_batch == 1 && ret == 1) {
  1905. // TODO: try to terminate only the largest active slot/sequence and continue with the rest
  1906. // need to remove the tokens from the current batch too
  1907. err = "Context size has been exceeded.";
  1908. }
  1909. if (ret == -1) {
  1910. err = "Invalid input batch.";
  1911. }
  1912. if (ret < -1) {
  1913. // TODO: update slot state based on llama_memory_seq_pos_min() and llama_memory_seq_pos_max()
  1914. err = "Compute error.";
  1915. }
  1916. // TODO: handle ret == 2 (abort) when we start aborting
  1917. if (!err.empty()) {
  1918. SRV_ERR("%s i = %d, n_batch = %d, ret = %d\n", err.c_str(), i, n_batch, ret);
  1919. for (auto & slot : slots) {
  1920. if (slot.is_processing()) {
  1921. send_error(slot, err);
  1922. slot.release();
  1923. // note: it's complicated to keep track of how much of the current batch has been
  1924. // processed before the error occurred, so we simply clear the entire context
  1925. clear_slot(slot);
  1926. }
  1927. }
  1928. break;
  1929. }
  1930. }
  1931. // retry with half the batch size to try to find a free slot in the KV cache
  1932. if (!try_clear_idle_slots()) {
  1933. n_batch /= 2;
  1934. }
  1935. SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
  1936. continue; // continue loop of n_batch
  1937. }
  1938. // move the head of the batch forward with the number of tokens we just processed
  1939. i_next = i + n_tokens;
  1940. // on successful decode, restore the original batch size
  1941. n_batch = llama_n_batch(ctx);
  1942. // technically, measuring the time here excludes the sampling time for the last batch
  1943. // but on the other hand, we don't want to do too many system calls to measure the time, so it's ok
  1944. const int64_t t_current = ggml_time_us();
  1945. for (auto & slot : slots) {
  1946. // may need to copy state to other slots
  1947. if (slot.state == SLOT_STATE_DONE_PROMPT && slot.is_parent()) {
  1948. std::vector<server_slot *> child_slots;
  1949. for (auto & other : slots) {
  1950. if (other.state == SLOT_STATE_WAIT_OTHER && slot.task->id == other.task->id_parent) {
  1951. child_slots.push_back(&other);
  1952. }
  1953. }
  1954. // we can only proceed if all child slots are having the correct tasks
  1955. if (child_slots.size() == slot.task->n_children) {
  1956. // copy state to the child slots
  1957. for (auto & child : child_slots) {
  1958. SLT_INF(slot, "copying state to child %d\n", child->id);
  1959. slot.copy_state_to(*child);
  1960. child->state = SLOT_STATE_DONE_PROMPT;
  1961. }
  1962. }
  1963. }
  1964. // optionally send prompt processing progress
  1965. if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_DONE_PROMPT) {
  1966. if (slot.task->params.stream && slot.task->params.return_progress) {
  1967. send_partial_response(slot, {}, true);
  1968. }
  1969. }
  1970. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) {
  1971. continue; // continue loop of slots
  1972. }
  1973. if (slot.state == SLOT_STATE_DONE_PROMPT) {
  1974. if (slot.task->type == SERVER_TASK_TYPE_EMBEDDING) {
  1975. // prompt evaluated for embedding
  1976. send_embedding(slot, batch_view);
  1977. slot.release();
  1978. slot.i_batch = -1;
  1979. continue; // continue loop of slots
  1980. }
  1981. if (slot.task->type == SERVER_TASK_TYPE_RERANK) {
  1982. send_rerank(slot, batch_view);
  1983. slot.release();
  1984. slot.i_batch = -1;
  1985. continue; // continue loop of slots
  1986. }
  1987. // prompt evaluated for next-token prediction
  1988. slot.state = SLOT_STATE_GENERATING;
  1989. } else if (slot.state != SLOT_STATE_GENERATING) {
  1990. continue; // continue loop of slots
  1991. }
  1992. if (slot.i_batch_dft.size() > 0) {
  1993. continue; // sample using speculative decoding
  1994. }
  1995. const int tok_idx = slot.i_batch - i;
  1996. llama_token id = common_sampler_sample(slot.smpl.get(), ctx, tok_idx);
  1997. slot.i_batch = -1;
  1998. common_sampler_accept(slot.smpl.get(), id, true);
  1999. slot.n_decoded += 1;
  2000. if (slot.n_decoded == 1) {
  2001. slot.t_start_generation = t_current;
  2002. slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3;
  2003. metrics.on_prompt_eval(slot);
  2004. }
  2005. slot.t_token_generation = std::max<int64_t>(1, t_current - slot.t_start_generation) / 1e3;
  2006. completion_token_output result;
  2007. result.tok = id;
  2008. result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok));
  2009. result.prob = 1.0f; // TODO: set it here instead of doing inside populate_token_probs
  2010. if (slot.task->params.sampling.n_probs > 0) {
  2011. populate_token_probs(slot, result, slot.task->params.post_sampling_probs, params_base.special, tok_idx);
  2012. }
  2013. if (!process_token(result, slot)) {
  2014. // release slot because of stop condition
  2015. slot.print_timings();
  2016. send_final_response(slot);
  2017. metrics.on_prediction(slot);
  2018. slot.release();
  2019. continue;
  2020. }
  2021. }
  2022. // speculative decoding - main model sample and accept
  2023. for (auto & slot : slots) {
  2024. if (slot.state != SLOT_STATE_GENERATING || slot.i_batch_dft.empty()) {
  2025. continue;
  2026. }
  2027. size_t n_draft = slot.drafted.size();
  2028. // the accepted tokens from the speculation
  2029. const auto ids = common_sampler_sample_and_accept_n(slot.smpl.get(), ctx, slot.i_batch_dft, slot.drafted);
  2030. slot.i_batch_dft.clear();
  2031. slot.drafted.clear();
  2032. slot.n_decoded += ids.size();
  2033. slot.t_token_generation = std::max<int64_t>(1, t_current - slot.t_start_generation) / 1e3;
  2034. // update how many tokens out of those tested were accepted
  2035. slot.n_draft_accepted += ids.size() - 1;
  2036. // rollback to the state before sampling the draft tokens
  2037. slot.prompt.tokens.keep_first(slot.prompt.n_tokens() - n_draft);
  2038. // add accepted tokens to the prompt
  2039. slot.prompt.tokens.insert({ids.begin(), ids.end() - 1});
  2040. slot.sampled = ids.back(); // last accepted token
  2041. llama_memory_seq_rm(llama_get_memory(ctx), slot.id, slot.prompt.n_tokens(), -1);
  2042. for (size_t i = 0; i < ids.size(); ++i) {
  2043. completion_token_output result;
  2044. result.tok = ids[i];
  2045. result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok));
  2046. result.prob = 1.0f; // set later
  2047. // TODO: set result.probs
  2048. if (!process_token(result, slot)) {
  2049. slot.print_timings();
  2050. send_final_response(slot);
  2051. metrics.on_prediction(slot);
  2052. slot.release();
  2053. break;
  2054. }
  2055. }
  2056. SLT_DBG(slot, "accepted %d/%d draft tokens, new n_tokens = %d\n", (int) ids.size() - 1, (int) slot.drafted.size(), slot.prompt.n_tokens());
  2057. }
  2058. }
  2059. SRV_DBG("%s", "run slots completed\n");
  2060. }
  2061. json model_meta() const {
  2062. return json {
  2063. {"vocab_type", llama_vocab_type (vocab)},
  2064. {"n_vocab", llama_vocab_n_tokens (vocab)},
  2065. {"n_ctx_train", llama_model_n_ctx_train(model)},
  2066. {"n_embd", llama_model_n_embd (model)},
  2067. {"n_params", llama_model_n_params (model)},
  2068. {"size", llama_model_size (model)},
  2069. };
  2070. }
  2071. int get_slot_n_ctx() {
  2072. return slots.back().n_ctx;
  2073. }
  2074. server_response_reader get_response_reader() {
  2075. return server_response_reader(queue_tasks, queue_results, HTTP_POLLING_SECONDS);
  2076. }
  2077. };
  2078. //
  2079. // server_context (public API)
  2080. //
  2081. server_context::server_context() : impl(new server_context_impl()) {}
  2082. server_context::~server_context() = default;
  2083. void server_context::init() {
  2084. impl->init();
  2085. }
  2086. bool server_context::load_model(const common_params & params) {
  2087. return impl->load_model(params);
  2088. }
  2089. void server_context::start_loop() {
  2090. impl->queue_tasks.start_loop();
  2091. }
  2092. void server_context::terminate() {
  2093. impl->queue_tasks.terminate();
  2094. }
  2095. llama_context * server_context::get_llama_context() const {
  2096. return impl->ctx;
  2097. }
  2098. server_response_reader server_context::get_response_reader() {
  2099. return impl->get_response_reader();
  2100. }
  2101. server_context_info server_context::get_info() const {
  2102. return server_context_info {
  2103. /* build_info */ build_info,
  2104. /* model_name */ impl->model_name,
  2105. /* has_inp_image */ impl->oai_parser_opt.allow_image,
  2106. /* has_inp_audio */ impl->oai_parser_opt.allow_audio,
  2107. };
  2108. }
  2109. // generator-like API for HTTP response generation
  2110. struct server_res_generator : server_http_res {
  2111. server_response_reader rd;
  2112. server_res_generator(server_context_impl & ctx_server)
  2113. : rd(ctx_server.queue_tasks, ctx_server.queue_results, HTTP_POLLING_SECONDS) {}
  2114. void ok(const json & response_data) {
  2115. status = 200;
  2116. data = safe_json_to_str(response_data);
  2117. }
  2118. void error(const json & error_data) {
  2119. status = json_value(error_data, "code", 500);
  2120. data = safe_json_to_str({{ "error", error_data }});
  2121. }
  2122. };
  2123. //
  2124. // server_routes
  2125. //
  2126. static std::unique_ptr<server_res_generator> handle_completions_impl(
  2127. server_context_impl & ctx_server,
  2128. server_task_type type,
  2129. const json & data,
  2130. const std::vector<raw_buffer> & files,
  2131. const std::function<bool()> & should_stop,
  2132. task_response_type res_type) {
  2133. GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL);
  2134. auto res = std::make_unique<server_res_generator>(ctx_server);
  2135. auto completion_id = gen_chatcmplid();
  2136. auto & rd = res->rd;
  2137. try {
  2138. std::vector<server_task> tasks;
  2139. const auto & prompt = data.at("prompt");
  2140. // TODO: this log can become very long, put it behind a flag or think about a more compact format
  2141. //SRV_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get<std::string>().c_str() : prompt.dump(2).c_str());
  2142. // process prompt
  2143. std::vector<server_tokens> inputs;
  2144. if (res_type != TASK_RESPONSE_TYPE_NONE && ctx_server.mctx != nullptr) {
  2145. // This is the case used by OAI compatible chat path with MTMD. TODO It can be moved to the path below.
  2146. inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
  2147. } else {
  2148. // Everything else, including multimodal completions.
  2149. inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
  2150. }
  2151. tasks.reserve(inputs.size());
  2152. int idx = 0;
  2153. for (size_t i = 0; i < inputs.size(); i++) {
  2154. server_task task = server_task(type);
  2155. task.id = ctx_server.queue_tasks.get_new_id();
  2156. task.index = idx++;
  2157. task.tokens = std::move(inputs[i]);
  2158. task.params = server_task::params_from_json_cmpl(
  2159. ctx_server.ctx,
  2160. ctx_server.params_base,
  2161. data);
  2162. task.id_slot = json_value(data, "id_slot", -1);
  2163. // OAI-compat
  2164. task.params.res_type = res_type;
  2165. task.params.oaicompat_cmpl_id = completion_id;
  2166. task.params.oaicompat_model = ctx_server.model_name;
  2167. if (task.params.n_cmpl > 1) {
  2168. task.n_children = task.params.n_cmpl - 1;
  2169. for (size_t j = 0; j < task.n_children; j++) {
  2170. server_task child = task.create_child(
  2171. task.id,
  2172. ctx_server.queue_tasks.get_new_id(),
  2173. idx++);
  2174. tasks.push_back(std::move(child));
  2175. }
  2176. }
  2177. tasks.push_back(std::move(task));
  2178. }
  2179. rd.post_tasks(std::move(tasks));
  2180. } catch (const std::exception & e) {
  2181. res->error(format_error_response(e.what(), ERROR_TYPE_INVALID_REQUEST));
  2182. return res;
  2183. }
  2184. bool stream = json_value(data, "stream", false);
  2185. if (!stream) {
  2186. // non-stream, wait for the results
  2187. auto all_results = rd.wait_for_all(should_stop);
  2188. if (all_results.is_terminated) {
  2189. return res; // connection is closed
  2190. } else if (all_results.error) {
  2191. res->error(all_results.error->to_json());
  2192. return res;
  2193. } else {
  2194. json arr = json::array();
  2195. for (auto & res : all_results.results) {
  2196. GGML_ASSERT(dynamic_cast<server_task_result_cmpl_final*>(res.get()) != nullptr);
  2197. arr.push_back(res->to_json());
  2198. }
  2199. GGML_ASSERT(!arr.empty() && "empty results");
  2200. if (arr.size() == 1) {
  2201. // if single request, return single object instead of array
  2202. res->ok(arr[0]);
  2203. } else if (res_type == TASK_RESPONSE_TYPE_OAI_CHAT || res_type == TASK_RESPONSE_TYPE_OAI_CMPL) {
  2204. // if multiple results in OAI format, we need to re-format them
  2205. json & choices = arr[0]["choices"];
  2206. for (size_t i = 1; i < arr.size(); i++) {
  2207. choices.push_back(std::move(arr[i]["choices"][0]));
  2208. }
  2209. res->ok(arr[0]);
  2210. } else {
  2211. // multi-results, non-OAI compat
  2212. res->ok(arr);
  2213. }
  2214. }
  2215. } else {
  2216. // in streaming mode, the first error must be treated as non-stream response
  2217. // this is to match the OAI API behavior
  2218. // ref: https://github.com/ggml-org/llama.cpp/pull/16486#discussion_r2419657309
  2219. server_task_result_ptr first_result = rd.next(should_stop);
  2220. if (first_result == nullptr) {
  2221. return res; // connection is closed
  2222. } else if (first_result->is_error()) {
  2223. res->error(first_result->to_json());
  2224. return res;
  2225. } else {
  2226. GGML_ASSERT(
  2227. dynamic_cast<server_task_result_cmpl_partial*>(first_result.get()) != nullptr
  2228. || dynamic_cast<server_task_result_cmpl_final*>(first_result.get()) != nullptr
  2229. );
  2230. }
  2231. // next responses are streamed
  2232. // to be sent immediately
  2233. json first_result_json = first_result->to_json();
  2234. if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) {
  2235. res->data = format_anthropic_sse(first_result_json);
  2236. } else {
  2237. res->data = format_oai_sse(first_result_json);
  2238. }
  2239. res->status = 200;
  2240. res->content_type = "text/event-stream";
  2241. res->next = [res_this = res.get(), res_type, &should_stop](std::string & output) -> bool {
  2242. static auto format_error = [](task_response_type res_type, const json & res_json) {
  2243. if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) {
  2244. return format_anthropic_sse({
  2245. {"event", "error"},
  2246. {"data", res_json},
  2247. });
  2248. } else {
  2249. return format_oai_sse(json {{ "error", res_json }});
  2250. }
  2251. };
  2252. try {
  2253. if (should_stop()) {
  2254. SRV_DBG("%s", "stopping streaming due to should_stop condition\n");
  2255. return false; // should_stop condition met
  2256. }
  2257. if (!res_this->data.empty()) {
  2258. // flush the first chunk
  2259. output = std::move(res_this->data);
  2260. res_this->data.clear();
  2261. return true;
  2262. }
  2263. server_response_reader & rd = res_this->rd;
  2264. // check if there is more data
  2265. if (!rd.has_next()) {
  2266. if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) {
  2267. // Anthropic doesn't send [DONE], message_stop was already sent
  2268. output = "";
  2269. } else if (res_type != TASK_RESPONSE_TYPE_NONE) {
  2270. output = "data: [DONE]\n\n";
  2271. } else {
  2272. output = "";
  2273. }
  2274. SRV_DBG("%s", "all results received, terminating stream\n");
  2275. return false; // no more data, terminate
  2276. }
  2277. // receive subsequent results
  2278. auto result = rd.next(should_stop);
  2279. if (result == nullptr) {
  2280. SRV_DBG("%s", "stopping streaming due to should_stop condition\n");
  2281. return false; // should_stop condition met
  2282. }
  2283. // send the results
  2284. if (result->is_error()) {
  2285. json res_json = result->to_json();
  2286. output = format_error(res_type, res_json);
  2287. SRV_DBG("%s", "error received during streaming, terminating stream\n");
  2288. return false; // terminate on error
  2289. } else {
  2290. GGML_ASSERT(
  2291. dynamic_cast<server_task_result_cmpl_partial*>(result.get()) != nullptr
  2292. || dynamic_cast<server_task_result_cmpl_final*>(result.get()) != nullptr
  2293. );
  2294. json res_json = result->to_json();
  2295. if (res_type == TASK_RESPONSE_TYPE_ANTHROPIC) {
  2296. output = format_anthropic_sse(res_json);
  2297. } else {
  2298. output = format_oai_sse(res_json);
  2299. }
  2300. }
  2301. // has next data, continue
  2302. return true;
  2303. } catch (const std::exception & e) {
  2304. json error_json = format_error_response(e.what(), ERROR_TYPE_SERVER);
  2305. output = format_error(res_type, error_json);
  2306. // terminate on exception
  2307. return false;
  2308. }
  2309. };
  2310. }
  2311. return res;
  2312. }
  2313. void server_routes::init_routes() {
  2314. this->get_health = [this](const server_http_req &) {
  2315. // error and loading states are handled by middleware
  2316. auto res = std::make_unique<server_res_generator>(ctx_server);
  2317. res->ok({{"status", "ok"}});
  2318. return res;
  2319. };
  2320. this->get_metrics = [this](const server_http_req &) {
  2321. auto res = std::make_unique<server_res_generator>(ctx_server);
  2322. if (!params.endpoint_metrics) {
  2323. res->error(format_error_response("This server does not support metrics endpoint. Start it with `--metrics`", ERROR_TYPE_NOT_SUPPORTED));
  2324. return res;
  2325. }
  2326. // request slots data using task queue
  2327. // TODO: use server_response_reader
  2328. int task_id = ctx_server.queue_tasks.get_new_id();
  2329. {
  2330. server_task task(SERVER_TASK_TYPE_METRICS);
  2331. task.id = task_id;
  2332. ctx_server.queue_results.add_waiting_task_id(task_id);
  2333. ctx_server.queue_tasks.post(std::move(task), true); // high-priority task
  2334. }
  2335. // get the result
  2336. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2337. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2338. if (result->is_error()) {
  2339. res->error(result->to_json());
  2340. return res;
  2341. }
  2342. // TODO: get rid of this dynamic_cast
  2343. auto res_task = dynamic_cast<server_task_result_metrics*>(result.get());
  2344. GGML_ASSERT(res_task != nullptr);
  2345. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2346. json all_metrics_def = json {
  2347. {"counter", {{
  2348. {"name", "prompt_tokens_total"},
  2349. {"help", "Number of prompt tokens processed."},
  2350. {"value", (uint64_t) res_task->n_prompt_tokens_processed_total}
  2351. }, {
  2352. {"name", "prompt_seconds_total"},
  2353. {"help", "Prompt process time"},
  2354. {"value", (uint64_t) res_task->t_prompt_processing_total / 1.e3}
  2355. }, {
  2356. {"name", "tokens_predicted_total"},
  2357. {"help", "Number of generation tokens processed."},
  2358. {"value", (uint64_t) res_task->n_tokens_predicted_total}
  2359. }, {
  2360. {"name", "tokens_predicted_seconds_total"},
  2361. {"help", "Predict process time"},
  2362. {"value", (uint64_t) res_task->t_tokens_generation_total / 1.e3}
  2363. }, {
  2364. {"name", "n_decode_total"},
  2365. {"help", "Total number of llama_decode() calls"},
  2366. {"value", res_task->n_decode_total}
  2367. }, {
  2368. {"name", "n_tokens_max"},
  2369. {"help", "Largest observed n_tokens."},
  2370. {"value", res_task->n_tokens_max}
  2371. }, {
  2372. {"name", "n_busy_slots_per_decode"},
  2373. {"help", "Average number of busy slots per llama_decode() call"},
  2374. {"value", (float) res_task->n_busy_slots_total / std::max((float) res_task->n_decode_total, 1.f)}
  2375. }}},
  2376. {"gauge", {{
  2377. {"name", "prompt_tokens_seconds"},
  2378. {"help", "Average prompt throughput in tokens/s."},
  2379. {"value", res_task->n_prompt_tokens_processed ? 1.e3 / res_task->t_prompt_processing * res_task->n_prompt_tokens_processed : 0.}
  2380. },{
  2381. {"name", "predicted_tokens_seconds"},
  2382. {"help", "Average generation throughput in tokens/s."},
  2383. {"value", res_task->n_tokens_predicted ? 1.e3 / res_task->t_tokens_generation * res_task->n_tokens_predicted : 0.}
  2384. },{
  2385. {"name", "requests_processing"},
  2386. {"help", "Number of requests processing."},
  2387. {"value", (uint64_t) res_task->n_processing_slots}
  2388. },{
  2389. {"name", "requests_deferred"},
  2390. {"help", "Number of requests deferred."},
  2391. {"value", (uint64_t) res_task->n_tasks_deferred}
  2392. }}}
  2393. };
  2394. std::stringstream prometheus;
  2395. for (const auto & el : all_metrics_def.items()) {
  2396. const auto & type = el.key();
  2397. const auto & metrics_def = el.value();
  2398. for (const auto & metric_def : metrics_def) {
  2399. const std::string name = metric_def.at("name");
  2400. const std::string help = metric_def.at("help");
  2401. auto value = json_value(metric_def, "value", 0.);
  2402. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2403. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2404. << "llamacpp:" << name << " " << value << "\n";
  2405. }
  2406. }
  2407. res->headers["Process-Start-Time-Unix"] = std::to_string(res_task->t_start);
  2408. res->content_type = "text/plain; version=0.0.4";
  2409. res->status = 200;
  2410. res->data = prometheus.str();
  2411. return res;
  2412. };
  2413. this->get_slots = [this](const server_http_req & req) {
  2414. auto res = std::make_unique<server_res_generator>(ctx_server);
  2415. if (!params.endpoint_slots) {
  2416. res->error(format_error_response("This server does not support slots endpoint. Start it with `--slots`", ERROR_TYPE_NOT_SUPPORTED));
  2417. return res;
  2418. }
  2419. // request slots data using task queue
  2420. int task_id = ctx_server.queue_tasks.get_new_id();
  2421. {
  2422. server_task task(SERVER_TASK_TYPE_METRICS);
  2423. task.id = task_id;
  2424. ctx_server.queue_results.add_waiting_task_id(task_id);
  2425. ctx_server.queue_tasks.post(std::move(task), true); // high-priority task
  2426. }
  2427. // get the result
  2428. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2429. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2430. if (result->is_error()) {
  2431. res->error(result->to_json());
  2432. return res;
  2433. }
  2434. // TODO: get rid of this dynamic_cast
  2435. auto res_task = dynamic_cast<server_task_result_metrics*>(result.get());
  2436. GGML_ASSERT(res_task != nullptr);
  2437. // optionally return "fail_on_no_slot" error
  2438. if (!req.get_param("fail_on_no_slot").empty()) {
  2439. if (res_task->n_idle_slots == 0) {
  2440. res->error(format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE));
  2441. return res;
  2442. }
  2443. }
  2444. res->ok(res_task->slots_data);
  2445. return res;
  2446. };
  2447. this->post_slots = [this](const server_http_req & req) {
  2448. auto res = std::make_unique<server_res_generator>(ctx_server);
  2449. if (params.slot_save_path.empty()) {
  2450. res->error(format_error_response("This server does not support slots action. Start it with `--slot-save-path`", ERROR_TYPE_NOT_SUPPORTED));
  2451. return res;
  2452. }
  2453. std::string id_slot_str = req.get_param("id_slot");
  2454. int id_slot;
  2455. try {
  2456. id_slot = std::stoi(id_slot_str);
  2457. } catch (const std::exception &) {
  2458. res->error(format_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST));
  2459. return res;
  2460. }
  2461. std::string action = req.get_param("action");
  2462. if (action == "save") {
  2463. return handle_slots_save(req, id_slot);
  2464. } else if (action == "restore") {
  2465. return handle_slots_restore(req, id_slot);
  2466. } else if (action == "erase") {
  2467. return handle_slots_erase(req, id_slot);
  2468. } else {
  2469. res->error(format_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST));
  2470. return res;
  2471. }
  2472. };
  2473. this->get_props = [this](const server_http_req &) {
  2474. auto res = std::make_unique<server_res_generator>(ctx_server);
  2475. json default_generation_settings_for_props;
  2476. {
  2477. task_params params;
  2478. params.sampling = ctx_server.params_base.sampling;
  2479. default_generation_settings_for_props = json {
  2480. {"params", params.to_json(true)},
  2481. {"n_ctx", ctx_server.get_slot_n_ctx()},
  2482. };
  2483. }
  2484. // this endpoint is publicly available, please only return what is safe to be exposed
  2485. json data = {
  2486. { "default_generation_settings", default_generation_settings_for_props },
  2487. { "total_slots", ctx_server.params_base.n_parallel },
  2488. { "model_alias", ctx_server.model_name },
  2489. { "model_path", ctx_server.params_base.model.path },
  2490. { "modalities", json {
  2491. {"vision", ctx_server.oai_parser_opt.allow_image},
  2492. {"audio", ctx_server.oai_parser_opt.allow_audio},
  2493. } },
  2494. { "endpoint_slots", params.endpoint_slots },
  2495. { "endpoint_props", params.endpoint_props },
  2496. { "endpoint_metrics", params.endpoint_metrics },
  2497. { "webui", params.webui },
  2498. { "chat_template", common_chat_templates_source(ctx_server.chat_templates.get()) },
  2499. { "bos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_bos(ctx_server.vocab), /* special= */ true)},
  2500. { "eos_token", common_token_to_piece(ctx_server.ctx, llama_vocab_eos(ctx_server.vocab), /* special= */ true)},
  2501. { "build_info", build_info },
  2502. };
  2503. if (ctx_server.params_base.use_jinja) {
  2504. if (auto tool_use_src = common_chat_templates_source(ctx_server.chat_templates.get(), "tool_use")) {
  2505. data["chat_template_tool_use"] = tool_use_src;
  2506. }
  2507. }
  2508. res->ok(data);
  2509. return res;
  2510. };
  2511. this->post_props = [this](const server_http_req &) {
  2512. auto res = std::make_unique<server_res_generator>(ctx_server);
  2513. if (!params.endpoint_props) {
  2514. res->error(format_error_response("This server does not support changing global properties. Start it with `--props`", ERROR_TYPE_NOT_SUPPORTED));
  2515. return res;
  2516. }
  2517. // update any props here
  2518. res->ok({{ "success", true }});
  2519. return res;
  2520. };
  2521. this->get_api_show = [this](const server_http_req &) {
  2522. auto res = std::make_unique<server_res_generator>(ctx_server);
  2523. bool has_mtmd = ctx_server.mctx != nullptr;
  2524. json data = {
  2525. {
  2526. "template", common_chat_templates_source(ctx_server.chat_templates.get()),
  2527. },
  2528. {
  2529. "model_info", {
  2530. { "llama.context_length", ctx_server.get_slot_n_ctx() },
  2531. }
  2532. },
  2533. {"modelfile", ""},
  2534. {"parameters", ""},
  2535. {"template", common_chat_templates_source(ctx_server.chat_templates.get())},
  2536. {"details", {
  2537. {"parent_model", ""},
  2538. {"format", "gguf"},
  2539. {"family", ""},
  2540. {"families", {""}},
  2541. {"parameter_size", ""},
  2542. {"quantization_level", ""}
  2543. }},
  2544. {"model_info", ""},
  2545. {"capabilities", has_mtmd ? json({"completion","multimodal"}) : json({"completion"})}
  2546. };
  2547. res->ok(data);
  2548. return res;
  2549. };
  2550. this->post_infill = [this](const server_http_req & req) {
  2551. auto res = std::make_unique<server_res_generator>(ctx_server);
  2552. // check model compatibility
  2553. std::string err;
  2554. if (llama_vocab_fim_pre(ctx_server.vocab) == LLAMA_TOKEN_NULL) {
  2555. err += "prefix token is missing. ";
  2556. }
  2557. if (llama_vocab_fim_suf(ctx_server.vocab) == LLAMA_TOKEN_NULL) {
  2558. err += "suffix token is missing. ";
  2559. }
  2560. if (llama_vocab_fim_mid(ctx_server.vocab) == LLAMA_TOKEN_NULL) {
  2561. err += "middle token is missing. ";
  2562. }
  2563. if (!err.empty()) {
  2564. res->error(format_error_response(string_format("Infill is not supported by this model: %s", err.c_str()), ERROR_TYPE_NOT_SUPPORTED));
  2565. return res;
  2566. }
  2567. // validate input
  2568. json data = json::parse(req.body);
  2569. if (data.contains("prompt") && !data.at("prompt").is_string()) {
  2570. // prompt is optional
  2571. res->error(format_error_response("\"prompt\" must be a string", ERROR_TYPE_INVALID_REQUEST));
  2572. }
  2573. if (!data.contains("input_prefix")) {
  2574. res->error(format_error_response("\"input_prefix\" is required", ERROR_TYPE_INVALID_REQUEST));
  2575. }
  2576. if (!data.contains("input_suffix")) {
  2577. res->error(format_error_response("\"input_suffix\" is required", ERROR_TYPE_INVALID_REQUEST));
  2578. }
  2579. if (data.contains("input_extra") && !data.at("input_extra").is_array()) {
  2580. // input_extra is optional
  2581. res->error(format_error_response("\"input_extra\" must be an array of {\"filename\": string, \"text\": string}", ERROR_TYPE_INVALID_REQUEST));
  2582. return res;
  2583. }
  2584. json input_extra = json_value(data, "input_extra", json::array());
  2585. for (const auto & chunk : input_extra) {
  2586. // { "text": string, "filename": string }
  2587. if (!chunk.contains("text") || !chunk.at("text").is_string()) {
  2588. res->error(format_error_response("extra_context chunk must contain a \"text\" field with a string value", ERROR_TYPE_INVALID_REQUEST));
  2589. return res;
  2590. }
  2591. // filename is optional
  2592. if (chunk.contains("filename") && !chunk.at("filename").is_string()) {
  2593. res->error(format_error_response("extra_context chunk's \"filename\" field must be a string", ERROR_TYPE_INVALID_REQUEST));
  2594. return res;
  2595. }
  2596. }
  2597. data["input_extra"] = input_extra; // default to empty array if it's not exist
  2598. std::string prompt = json_value(data, "prompt", std::string());
  2599. std::vector<server_tokens> tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, false, true);
  2600. SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
  2601. data["prompt"] = format_prompt_infill(
  2602. ctx_server.vocab,
  2603. data.at("input_prefix"),
  2604. data.at("input_suffix"),
  2605. data.at("input_extra"),
  2606. ctx_server.params_base.n_batch,
  2607. ctx_server.params_base.n_predict,
  2608. ctx_server.get_slot_n_ctx(),
  2609. ctx_server.params_base.spm_infill,
  2610. tokenized_prompts[0].get_text_tokens() // TODO: this could maybe be multimodal.
  2611. );
  2612. std::vector<raw_buffer> files; // dummy
  2613. return handle_completions_impl(
  2614. ctx_server,
  2615. SERVER_TASK_TYPE_INFILL,
  2616. data,
  2617. files,
  2618. req.should_stop,
  2619. TASK_RESPONSE_TYPE_NONE); // infill is not OAI compatible
  2620. };
  2621. this->post_completions = [this](const server_http_req & req) {
  2622. std::vector<raw_buffer> files; // dummy
  2623. const json body = json::parse(req.body);
  2624. return handle_completions_impl(
  2625. ctx_server,
  2626. SERVER_TASK_TYPE_COMPLETION,
  2627. body,
  2628. files,
  2629. req.should_stop,
  2630. TASK_RESPONSE_TYPE_NONE);
  2631. };
  2632. this->post_completions_oai = [this](const server_http_req & req) {
  2633. std::vector<raw_buffer> files; // dummy
  2634. const json body = json::parse(req.body);
  2635. return handle_completions_impl(
  2636. ctx_server,
  2637. SERVER_TASK_TYPE_COMPLETION,
  2638. body,
  2639. files,
  2640. req.should_stop,
  2641. TASK_RESPONSE_TYPE_OAI_CMPL);
  2642. };
  2643. this->post_chat_completions = [this](const server_http_req & req) {
  2644. std::vector<raw_buffer> files;
  2645. json body = json::parse(req.body);
  2646. json body_parsed = oaicompat_chat_params_parse(
  2647. body,
  2648. ctx_server.oai_parser_opt,
  2649. files);
  2650. return handle_completions_impl(
  2651. ctx_server,
  2652. SERVER_TASK_TYPE_COMPLETION,
  2653. body_parsed,
  2654. files,
  2655. req.should_stop,
  2656. TASK_RESPONSE_TYPE_OAI_CHAT);
  2657. };
  2658. this->post_anthropic_messages = [this](const server_http_req & req) {
  2659. std::vector<raw_buffer> files;
  2660. json body = convert_anthropic_to_oai(json::parse(req.body));
  2661. json body_parsed = oaicompat_chat_params_parse(
  2662. body,
  2663. ctx_server.oai_parser_opt,
  2664. files);
  2665. return handle_completions_impl(
  2666. ctx_server,
  2667. SERVER_TASK_TYPE_COMPLETION,
  2668. body_parsed,
  2669. files,
  2670. req.should_stop,
  2671. TASK_RESPONSE_TYPE_ANTHROPIC);
  2672. };
  2673. this->post_anthropic_count_tokens = [this](const server_http_req & req) {
  2674. auto res = std::make_unique<server_res_generator>(ctx_server);
  2675. std::vector<raw_buffer> files;
  2676. json body = convert_anthropic_to_oai(json::parse(req.body));
  2677. json body_parsed = oaicompat_chat_params_parse(
  2678. body,
  2679. ctx_server.oai_parser_opt,
  2680. files);
  2681. json prompt = body_parsed.at("prompt");
  2682. llama_tokens tokens = tokenize_mixed(ctx_server.vocab, prompt, true, true);
  2683. res->ok({{"input_tokens", static_cast<int>(tokens.size())}});
  2684. return res;
  2685. };
  2686. // same with handle_chat_completions, but without inference part
  2687. this->post_apply_template = [this](const server_http_req & req) {
  2688. auto res = std::make_unique<server_res_generator>(ctx_server);
  2689. std::vector<raw_buffer> files; // dummy, unused
  2690. json body = json::parse(req.body);
  2691. json data = oaicompat_chat_params_parse(
  2692. body,
  2693. ctx_server.oai_parser_opt,
  2694. files);
  2695. res->ok({{ "prompt", std::move(data.at("prompt")) }});
  2696. return res;
  2697. };
  2698. this->get_models = [this](const server_http_req &) {
  2699. auto res = std::make_unique<server_res_generator>(ctx_server);
  2700. json model_meta = nullptr;
  2701. if (is_ready()) {
  2702. model_meta = ctx_server.model_meta();
  2703. }
  2704. bool has_mtmd = ctx_server.mctx != nullptr;
  2705. json models = {
  2706. {"models", {
  2707. {
  2708. {"name", ctx_server.model_name},
  2709. {"model", ctx_server.model_name},
  2710. {"modified_at", ""},
  2711. {"size", ""},
  2712. {"digest", ""}, // dummy value, llama.cpp does not support managing model file's hash
  2713. {"type", "model"},
  2714. {"description", ""},
  2715. {"tags", {""}},
  2716. {"capabilities", has_mtmd ? json({"completion","multimodal"}) : json({"completion"})},
  2717. {"parameters", ""},
  2718. {"details", {
  2719. {"parent_model", ""},
  2720. {"format", "gguf"},
  2721. {"family", ""},
  2722. {"families", {""}},
  2723. {"parameter_size", ""},
  2724. {"quantization_level", ""}
  2725. }}
  2726. }
  2727. }},
  2728. {"object", "list"},
  2729. {"data", {
  2730. {
  2731. {"id", ctx_server.model_name},
  2732. {"object", "model"},
  2733. {"created", std::time(0)},
  2734. {"owned_by", "llamacpp"},
  2735. {"meta", model_meta},
  2736. },
  2737. }}
  2738. };
  2739. res->ok(models);
  2740. return res;
  2741. };
  2742. this->post_tokenize = [this](const server_http_req & req) {
  2743. auto res = std::make_unique<server_res_generator>(ctx_server);
  2744. const json body = json::parse(req.body);
  2745. json tokens_response = json::array();
  2746. if (body.count("content") != 0) {
  2747. const bool add_special = json_value(body, "add_special", false);
  2748. const bool parse_special = json_value(body, "parse_special", true);
  2749. const bool with_pieces = json_value(body, "with_pieces", false);
  2750. llama_tokens tokens = tokenize_mixed(ctx_server.vocab, body.at("content"), add_special, parse_special);
  2751. if (with_pieces) {
  2752. for (const auto& token : tokens) {
  2753. std::string piece = common_token_to_piece(ctx_server.ctx, token);
  2754. json piece_json;
  2755. // Check if the piece is valid UTF-8
  2756. if (is_valid_utf8(piece)) {
  2757. piece_json = piece;
  2758. } else {
  2759. // If not valid UTF-8, store as array of byte values
  2760. piece_json = json::array();
  2761. for (unsigned char c : piece) {
  2762. piece_json.push_back(static_cast<int>(c));
  2763. }
  2764. }
  2765. tokens_response.push_back({
  2766. {"id", token},
  2767. {"piece", piece_json}
  2768. });
  2769. }
  2770. } else {
  2771. tokens_response = tokens;
  2772. }
  2773. }
  2774. res->ok(json{{"tokens", std::move(tokens_response)}});
  2775. return res;
  2776. };
  2777. this->post_detokenize = [this](const server_http_req & req) {
  2778. auto res = std::make_unique<server_res_generator>(ctx_server);
  2779. const json body = json::parse(req.body);
  2780. std::string content;
  2781. if (body.count("tokens") != 0) {
  2782. const llama_tokens tokens = body.at("tokens");
  2783. content = tokens_to_str(ctx_server.ctx, tokens);
  2784. }
  2785. res->ok(json{{"content", std::move(content)}});
  2786. return res;
  2787. };
  2788. this->post_embeddings = [this](const server_http_req & req) {
  2789. return handle_embeddings_impl(req, TASK_RESPONSE_TYPE_NONE);
  2790. };
  2791. this->post_embeddings_oai = [this](const server_http_req & req) {
  2792. return handle_embeddings_impl(req, TASK_RESPONSE_TYPE_OAI_EMBD);
  2793. };
  2794. this->post_rerank = [this](const server_http_req & req) {
  2795. auto res = std::make_unique<server_res_generator>(ctx_server);
  2796. if (!ctx_server.params_base.embedding || ctx_server.params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) {
  2797. res->error(format_error_response("This server does not support reranking. Start it with `--reranking`", ERROR_TYPE_NOT_SUPPORTED));
  2798. return res;
  2799. }
  2800. const json body = json::parse(req.body);
  2801. // if true, use TEI API format, otherwise use Jina API format
  2802. // Jina: https://jina.ai/reranker/
  2803. // TEI: https://huggingface.github.io/text-embeddings-inference/#/Text%20Embeddings%20Inference/rerank
  2804. bool is_tei_format = body.contains("texts");
  2805. json query;
  2806. if (body.count("query") == 1) {
  2807. query = body.at("query");
  2808. if (!query.is_string()) {
  2809. res->error(format_error_response("\"query\" must be a string", ERROR_TYPE_INVALID_REQUEST));
  2810. return res;
  2811. }
  2812. } else {
  2813. res->error(format_error_response("\"query\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2814. return res;
  2815. }
  2816. std::vector<std::string> documents = json_value(body, "documents",
  2817. json_value(body, "texts", std::vector<std::string>()));
  2818. if (documents.empty()) {
  2819. res->error(format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST));
  2820. return res;
  2821. }
  2822. int top_n = json_value(body, "top_n", (int)documents.size());
  2823. // create and queue the task
  2824. json responses = json::array();
  2825. server_response_reader rd = ctx_server.get_response_reader();
  2826. {
  2827. std::vector<server_task> tasks;
  2828. tasks.reserve(documents.size());
  2829. for (size_t i = 0; i < documents.size(); i++) {
  2830. auto tmp = format_prompt_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, query, documents[i]);
  2831. server_task task = server_task(SERVER_TASK_TYPE_RERANK);
  2832. task.id = ctx_server.queue_tasks.get_new_id();
  2833. task.index = i;
  2834. task.tokens = std::move(tmp);
  2835. tasks.push_back(std::move(task));
  2836. }
  2837. rd.post_tasks(std::move(tasks));
  2838. }
  2839. // wait for the results
  2840. auto all_results = rd.wait_for_all(req.should_stop);
  2841. // collect results
  2842. if (all_results.is_terminated) {
  2843. return res; // connection is closed
  2844. } else if (all_results.error) {
  2845. res->error(all_results.error->to_json());
  2846. return res;
  2847. } else {
  2848. for (auto & res : all_results.results) {
  2849. GGML_ASSERT(dynamic_cast<server_task_result_rerank*>(res.get()) != nullptr);
  2850. responses.push_back(res->to_json());
  2851. }
  2852. }
  2853. // write JSON response
  2854. json root = format_response_rerank(
  2855. body,
  2856. ctx_server.model_name,
  2857. responses,
  2858. is_tei_format,
  2859. documents,
  2860. top_n);
  2861. res->ok(root);
  2862. return res;
  2863. };
  2864. this->get_lora_adapters = [this](const server_http_req &) {
  2865. auto res = std::make_unique<server_res_generator>(ctx_server);
  2866. json result = json::array();
  2867. const auto & loras = ctx_server.params_base.lora_adapters;
  2868. for (size_t i = 0; i < loras.size(); ++i) {
  2869. auto & lora = loras[i];
  2870. json entry = {
  2871. {"id", i},
  2872. {"path", lora.path},
  2873. {"scale", lora.scale},
  2874. {"task_name", lora.task_name},
  2875. {"prompt_prefix", lora.prompt_prefix},
  2876. };
  2877. std::string alora_invocation_string = "";
  2878. const uint64_t n_alora_tokens = llama_adapter_get_alora_n_invocation_tokens(lora.ptr);
  2879. std::vector<llama_token> alora_invocation_tokens;
  2880. if (n_alora_tokens) {
  2881. const llama_token * alora_tokens = llama_adapter_get_alora_invocation_tokens(lora.ptr);
  2882. for (uint64_t i = 0; i < n_alora_tokens; ++i) {
  2883. alora_invocation_string += common_token_to_piece(ctx_server.ctx, alora_tokens[i]);
  2884. alora_invocation_tokens.push_back(alora_tokens[i]);
  2885. }
  2886. entry["alora_invocation_string"] = alora_invocation_string;
  2887. entry["alora_invocation_tokens"] = alora_invocation_tokens;
  2888. }
  2889. result.push_back(std::move(entry));
  2890. }
  2891. res->ok(result);
  2892. return res;
  2893. };
  2894. this->post_lora_adapters = [this](const server_http_req & req) {
  2895. auto res = std::make_unique<server_res_generator>(ctx_server);
  2896. const json body = json::parse(req.body);
  2897. if (!body.is_array()) {
  2898. res->error(format_error_response("Request body must be an array", ERROR_TYPE_INVALID_REQUEST));
  2899. return res;
  2900. }
  2901. int task_id = ctx_server.queue_tasks.get_new_id();
  2902. {
  2903. server_task task(SERVER_TASK_TYPE_SET_LORA);
  2904. task.id = task_id;
  2905. task.set_lora = parse_lora_request(ctx_server.params_base.lora_adapters, body);
  2906. ctx_server.queue_results.add_waiting_task_id(task_id);
  2907. ctx_server.queue_tasks.post(std::move(task));
  2908. }
  2909. // get the result
  2910. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2911. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2912. if (result->is_error()) {
  2913. res->error(result->to_json());
  2914. return res;
  2915. }
  2916. GGML_ASSERT(dynamic_cast<server_task_result_apply_lora*>(result.get()) != nullptr);
  2917. res->ok(result->to_json());
  2918. return res;
  2919. };
  2920. }
  2921. std::unique_ptr<server_res_generator> server_routes::handle_slots_save(const server_http_req & req, int id_slot) {
  2922. auto res = std::make_unique<server_res_generator>(ctx_server);
  2923. const json request_data = json::parse(req.body);
  2924. std::string filename = request_data.at("filename");
  2925. if (!fs_validate_filename(filename)) {
  2926. res->error(format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2927. return res;
  2928. }
  2929. std::string filepath = params.slot_save_path + filename;
  2930. int task_id = ctx_server.queue_tasks.get_new_id();
  2931. {
  2932. server_task task(SERVER_TASK_TYPE_SLOT_SAVE);
  2933. task.id = task_id;
  2934. task.slot_action.slot_id = id_slot;
  2935. task.slot_action.filename = filename;
  2936. task.slot_action.filepath = filepath;
  2937. // TODO: use server_response_reader
  2938. ctx_server.queue_results.add_waiting_task_id(task_id);
  2939. ctx_server.queue_tasks.post(std::move(task));
  2940. }
  2941. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2942. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2943. if (result->is_error()) {
  2944. res->error(result->to_json());
  2945. return res;
  2946. }
  2947. res->ok(result->to_json());
  2948. return res;
  2949. }
  2950. std::unique_ptr<server_res_generator> server_routes::handle_slots_restore(const server_http_req & req, int id_slot) {
  2951. auto res = std::make_unique<server_res_generator>(ctx_server);
  2952. const json request_data = json::parse(req.body);
  2953. std::string filename = request_data.at("filename");
  2954. if (!fs_validate_filename(filename)) {
  2955. res->error(format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2956. return res;
  2957. }
  2958. std::string filepath = params.slot_save_path + filename;
  2959. int task_id = ctx_server.queue_tasks.get_new_id();
  2960. {
  2961. server_task task(SERVER_TASK_TYPE_SLOT_RESTORE);
  2962. task.id = task_id;
  2963. task.slot_action.slot_id = id_slot;
  2964. task.slot_action.filename = filename;
  2965. task.slot_action.filepath = filepath;
  2966. // TODO: use server_response_reader
  2967. ctx_server.queue_results.add_waiting_task_id(task_id);
  2968. ctx_server.queue_tasks.post(std::move(task));
  2969. }
  2970. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2971. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2972. if (result->is_error()) {
  2973. res->error(result->to_json());
  2974. return res;
  2975. }
  2976. GGML_ASSERT(dynamic_cast<server_task_result_slot_save_load*>(result.get()) != nullptr);
  2977. res->ok(result->to_json());
  2978. return res;
  2979. }
  2980. std::unique_ptr<server_res_generator> server_routes::handle_slots_erase(const server_http_req &, int id_slot) {
  2981. auto res = std::make_unique<server_res_generator>(ctx_server);
  2982. int task_id = ctx_server.queue_tasks.get_new_id();
  2983. {
  2984. server_task task(SERVER_TASK_TYPE_SLOT_ERASE);
  2985. task.id = task_id;
  2986. task.slot_action.slot_id = id_slot;
  2987. // TODO: use server_response_reader
  2988. ctx_server.queue_results.add_waiting_task_id(task_id);
  2989. ctx_server.queue_tasks.post(std::move(task));
  2990. }
  2991. server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
  2992. ctx_server.queue_results.remove_waiting_task_id(task_id);
  2993. if (result->is_error()) {
  2994. res->error(result->to_json());
  2995. return res;
  2996. }
  2997. GGML_ASSERT(dynamic_cast<server_task_result_slot_erase*>(result.get()) != nullptr);
  2998. res->ok(result->to_json());
  2999. return res;
  3000. }
  3001. std::unique_ptr<server_res_generator> server_routes::handle_embeddings_impl(const server_http_req & req, task_response_type res_type) {
  3002. auto res = std::make_unique<server_res_generator>(ctx_server);
  3003. if (!ctx_server.params_base.embedding) {
  3004. res->error(format_error_response("This server does not support embeddings. Start it with `--embeddings`", ERROR_TYPE_NOT_SUPPORTED));
  3005. return res;
  3006. }
  3007. if (res_type != TASK_RESPONSE_TYPE_NONE && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) {
  3008. res->error(format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST));
  3009. return res;
  3010. }
  3011. const json body = json::parse(req.body);
  3012. // for the shape of input/content, see tokenize_input_prompts()
  3013. json prompt;
  3014. if (body.count("input") != 0) {
  3015. prompt = body.at("input");
  3016. } else if (body.contains("content")) {
  3017. res_type = TASK_RESPONSE_TYPE_NONE; // "content" field is not OAI compatible
  3018. prompt = body.at("content");
  3019. } else {
  3020. res->error(format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  3021. return res;
  3022. }
  3023. bool use_base64 = false;
  3024. if (body.count("encoding_format") != 0) {
  3025. const std::string& format = body.at("encoding_format");
  3026. if (format == "base64") {
  3027. use_base64 = true;
  3028. } else if (format != "float") {
  3029. res->error(format_error_response("The format to return the embeddings in. Can be either float or base64", ERROR_TYPE_INVALID_REQUEST));
  3030. return res;
  3031. }
  3032. }
  3033. auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
  3034. for (const auto & tokens : tokenized_prompts) {
  3035. // this check is necessary for models that do not add BOS token to the input
  3036. if (tokens.empty()) {
  3037. res->error(format_error_response("Input content cannot be empty", ERROR_TYPE_INVALID_REQUEST));
  3038. return res;
  3039. }
  3040. }
  3041. int embd_normalize = 2; // default to Euclidean/L2 norm
  3042. if (body.count("embd_normalize") != 0) {
  3043. embd_normalize = body.at("embd_normalize");
  3044. if (llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) {
  3045. SRV_DBG("embd_normalize is not supported by pooling type %d, ignoring it\n", llama_pooling_type(ctx_server.ctx));
  3046. }
  3047. }
  3048. // create and queue the task
  3049. json responses = json::array();
  3050. server_response_reader rd = ctx_server.get_response_reader();
  3051. {
  3052. std::vector<server_task> tasks;
  3053. for (size_t i = 0; i < tokenized_prompts.size(); i++) {
  3054. server_task task = server_task(SERVER_TASK_TYPE_EMBEDDING);
  3055. task.id = ctx_server.queue_tasks.get_new_id();
  3056. task.index = i;
  3057. task.tokens = std::move(tokenized_prompts[i]);
  3058. // OAI-compat
  3059. task.params.res_type = res_type;
  3060. task.params.embd_normalize = embd_normalize;
  3061. tasks.push_back(std::move(task));
  3062. }
  3063. rd.post_tasks(std::move(tasks));
  3064. }
  3065. // wait for the results
  3066. auto all_results = rd.wait_for_all(req.should_stop);
  3067. // collect results
  3068. if (all_results.is_terminated) {
  3069. return res; // connection is closed
  3070. } else if (all_results.error) {
  3071. res->error(all_results.error->to_json());
  3072. return res;
  3073. } else {
  3074. for (auto & res : all_results.results) {
  3075. GGML_ASSERT(dynamic_cast<server_task_result_embd*>(res.get()) != nullptr);
  3076. responses.push_back(res->to_json());
  3077. }
  3078. }
  3079. // write JSON response
  3080. json root = res_type == TASK_RESPONSE_TYPE_OAI_EMBD
  3081. ? format_embeddings_response_oaicompat(body, ctx_server.model_name, responses, use_base64)
  3082. : json(responses);
  3083. res->ok(root);
  3084. return res;
  3085. }