server.cpp 132 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271
  1. #include "utils.hpp"
  2. #include "arg.h"
  3. #include "common.h"
  4. #include "log.h"
  5. #include "sampling.h"
  6. #include "json-schema-to-grammar.h"
  7. #include "llama.h"
  8. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  9. #define JSON_ASSERT GGML_ASSERT
  10. #include "json.hpp"
  11. // mime type for sending response
  12. #define MIMETYPE_JSON "application/json; charset=utf-8"
  13. // auto generated files (update with ./deps.sh)
  14. #include "colorthemes.css.hpp"
  15. #include "style.css.hpp"
  16. #include "theme-beeninorder.css.hpp"
  17. #include "theme-ketivah.css.hpp"
  18. #include "theme-mangotango.css.hpp"
  19. #include "theme-playground.css.hpp"
  20. #include "theme-polarnight.css.hpp"
  21. #include "theme-snowstorm.css.hpp"
  22. #include "index.html.hpp"
  23. #include "index-new.html.hpp"
  24. #include "index.js.hpp"
  25. #include "completion.js.hpp"
  26. #include "system-prompts.js.hpp"
  27. #include "prompt-formats.js.hpp"
  28. #include "json-schema-to-grammar.mjs.hpp"
  29. #include "loading.html.hpp"
  30. #include <atomic>
  31. #include <condition_variable>
  32. #include <cstddef>
  33. #include <cinttypes>
  34. #include <deque>
  35. #include <memory>
  36. #include <mutex>
  37. #include <signal.h>
  38. #include <thread>
  39. #include <unordered_map>
  40. #include <unordered_set>
  41. using json = nlohmann::ordered_json;
  42. enum stop_type {
  43. STOP_TYPE_FULL,
  44. STOP_TYPE_PARTIAL,
  45. };
  46. // state diagram: https://github.com/ggerganov/llama.cpp/pull/9283
  47. enum slot_state {
  48. SLOT_STATE_IDLE,
  49. SLOT_STATE_STARTED, // TODO: this state is only used for setting up the initial prompt processing; maybe merge it with launch_slot_with_task in the future
  50. SLOT_STATE_PROCESSING_PROMPT,
  51. SLOT_STATE_DONE_PROMPT,
  52. SLOT_STATE_GENERATING,
  53. };
  54. enum server_state {
  55. SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
  56. SERVER_STATE_READY, // Server is ready and model is loaded
  57. };
  58. enum server_task_type {
  59. SERVER_TASK_TYPE_INFERENCE,
  60. SERVER_TASK_TYPE_CANCEL,
  61. SERVER_TASK_TYPE_NEXT_RESPONSE,
  62. SERVER_TASK_TYPE_METRICS,
  63. SERVER_TASK_TYPE_SLOT_SAVE,
  64. SERVER_TASK_TYPE_SLOT_RESTORE,
  65. SERVER_TASK_TYPE_SLOT_ERASE,
  66. SERVER_TASK_TYPE_SET_LORA,
  67. };
  68. enum server_task_inf_type {
  69. SERVER_TASK_INF_TYPE_COMPLETION,
  70. SERVER_TASK_INF_TYPE_EMBEDDING,
  71. SERVER_TASK_INF_TYPE_RERANK,
  72. SERVER_TASK_INF_TYPE_INFILL,
  73. };
  74. struct server_task {
  75. int id = -1; // to be filled by server_queue
  76. int id_target = -1; // used by SERVER_TASK_TYPE_CANCEL
  77. llama_tokens prompt_tokens;
  78. server_task_type type;
  79. json data;
  80. server_task_inf_type inf_type = SERVER_TASK_INF_TYPE_COMPLETION;
  81. // utility function
  82. static std::unordered_set<int> get_list_id(const std::vector<server_task> & tasks) {
  83. std::unordered_set<int> ids(tasks.size());
  84. for (size_t i = 0; i < tasks.size(); i++) {
  85. ids.insert(tasks[i].id);
  86. }
  87. return ids;
  88. }
  89. };
  90. struct server_task_result {
  91. int id = -1;
  92. json data;
  93. bool stop;
  94. bool error;
  95. };
  96. struct slot_params {
  97. bool stream = true;
  98. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  99. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  100. int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
  101. int32_t n_predict = -1; // new tokens to predict
  102. int32_t n_indent = 0; // mininum line indentation for the generated text in number of whitespace characters
  103. int64_t t_max_prompt_ms = -1; // TODO: implement
  104. int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit
  105. std::vector<std::string> antiprompt;
  106. };
  107. struct server_slot {
  108. int id;
  109. int id_task = -1;
  110. // the index relative to completion multi-task request
  111. size_t index = 0;
  112. struct slot_params params;
  113. slot_state state = SLOT_STATE_IDLE;
  114. // used to determine the slot that has been used the longest
  115. int64_t t_last_used = -1;
  116. // generation props
  117. int32_t n_ctx = 0; // context size per slot
  118. int32_t n_past = 0;
  119. int32_t n_decoded = 0;
  120. int32_t n_remaining = -1;
  121. int32_t i_batch = -1;
  122. int32_t n_predict = -1; // TODO: disambiguate from params.n_predict
  123. // n_prompt_tokens may not be equal to prompt_tokens.size(), because prompt maybe truncated
  124. int32_t n_prompt_tokens = 0;
  125. int32_t n_prompt_tokens_processed = 0;
  126. // input prompt tokens
  127. llama_tokens prompt_tokens;
  128. size_t last_nl_pos = 0;
  129. std::string generated_text;
  130. llama_tokens cache_tokens;
  131. std::vector<completion_token_output> generated_token_probs;
  132. server_task_inf_type inf_type = SERVER_TASK_INF_TYPE_COMPLETION;
  133. bool has_next_token = true;
  134. bool has_new_line = false;
  135. bool truncated = false;
  136. bool stopped_eos = false;
  137. bool stopped_word = false;
  138. bool stopped_limit = false;
  139. bool oaicompat = false;
  140. std::string oaicompat_model;
  141. std::string stopping_word;
  142. // sampling
  143. json json_schema;
  144. struct common_sampler_params sparams;
  145. struct common_sampler * smpl = nullptr;
  146. llama_token sampled;
  147. // stats
  148. size_t n_sent_text = 0; // number of sent text character
  149. size_t n_sent_token_probs = 0;
  150. int64_t t_start_process_prompt;
  151. int64_t t_start_generation;
  152. double t_prompt_processing; // ms
  153. double t_token_generation; // ms
  154. std::function<void(int)> callback_on_release;
  155. void reset() {
  156. SLT_DBG(*this, "%s", "\n");
  157. n_prompt_tokens = 0;
  158. last_nl_pos = 0;
  159. generated_text = "";
  160. has_new_line = false;
  161. truncated = false;
  162. stopped_eos = false;
  163. stopped_word = false;
  164. stopped_limit = false;
  165. stopping_word = "";
  166. n_past = 0;
  167. n_sent_text = 0;
  168. n_sent_token_probs = 0;
  169. inf_type = SERVER_TASK_INF_TYPE_COMPLETION;
  170. generated_token_probs.clear();
  171. }
  172. bool has_budget(common_params &global_params) {
  173. if (params.n_predict == -1 && global_params.n_predict == -1) {
  174. return true; // limitless
  175. }
  176. n_remaining = -1;
  177. if (params.n_predict != -1) {
  178. n_remaining = params.n_predict - n_decoded;
  179. } else if (global_params.n_predict != -1) {
  180. n_remaining = global_params.n_predict - n_decoded;
  181. }
  182. return n_remaining > 0; // no budget
  183. }
  184. bool is_processing() const {
  185. return state != SLOT_STATE_IDLE;
  186. }
  187. void add_token(const completion_token_output & token) {
  188. if (!is_processing()) {
  189. SLT_WRN(*this, "%s", "slot is not processing\n");
  190. return;
  191. }
  192. generated_token_probs.push_back(token);
  193. }
  194. void release() {
  195. if (is_processing()) {
  196. SLT_INF(*this, "stop processing: n_past = %d, truncated = %d\n", n_past, truncated);
  197. t_last_used = ggml_time_us();
  198. t_token_generation = (ggml_time_us() - t_start_generation) / 1e3;
  199. state = SLOT_STATE_IDLE;
  200. callback_on_release(id);
  201. }
  202. }
  203. json get_formated_timings() const {
  204. return json {
  205. {"prompt_n", n_prompt_tokens_processed},
  206. {"prompt_ms", t_prompt_processing},
  207. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  208. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  209. {"predicted_n", n_decoded},
  210. {"predicted_ms", t_token_generation},
  211. {"predicted_per_token_ms", t_token_generation / n_decoded},
  212. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  213. };
  214. }
  215. size_t find_stopping_strings(const std::string & text, const size_t last_token_size, const stop_type type) {
  216. size_t stop_pos = std::string::npos;
  217. for (const std::string & word : params.antiprompt) {
  218. size_t pos;
  219. if (type == STOP_TYPE_FULL) {
  220. const size_t tmp = word.size() + last_token_size;
  221. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  222. pos = text.find(word, from_pos);
  223. } else {
  224. pos = find_partial_stop_string(word, text);
  225. }
  226. if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) {
  227. if (type == STOP_TYPE_FULL) {
  228. stopped_word = true;
  229. stopping_word = word;
  230. has_next_token = false;
  231. }
  232. stop_pos = pos;
  233. }
  234. }
  235. return stop_pos;
  236. }
  237. void print_timings() const {
  238. const double t_prompt = t_prompt_processing / n_prompt_tokens_processed;
  239. const double n_prompt_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  240. const double t_gen = t_token_generation / n_decoded;
  241. const double n_gen_second = 1e3 / t_token_generation * n_decoded;
  242. SLT_INF(*this,
  243. "\n"
  244. "\rprompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  245. "\r eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  246. "\r total time = %10.2f ms / %5d tokens\n",
  247. t_prompt_processing, n_prompt_tokens_processed, t_prompt, n_prompt_second,
  248. t_token_generation, n_decoded, t_gen, n_gen_second,
  249. t_prompt_processing + t_token_generation, n_prompt_tokens_processed + n_decoded);
  250. }
  251. };
  252. struct server_metrics {
  253. int64_t t_start = 0;
  254. uint64_t n_prompt_tokens_processed_total = 0;
  255. uint64_t t_prompt_processing_total = 0;
  256. uint64_t n_tokens_predicted_total = 0;
  257. uint64_t t_tokens_generation_total = 0;
  258. uint64_t n_prompt_tokens_processed = 0;
  259. uint64_t t_prompt_processing = 0;
  260. uint64_t n_tokens_predicted = 0;
  261. uint64_t t_tokens_generation = 0;
  262. uint64_t n_decode_total = 0;
  263. uint64_t n_busy_slots_total = 0;
  264. void init() {
  265. t_start = ggml_time_us();
  266. }
  267. void on_prompt_eval(const server_slot & slot) {
  268. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  269. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  270. t_prompt_processing += slot.t_prompt_processing;
  271. t_prompt_processing_total += slot.t_prompt_processing;
  272. }
  273. void on_prediction(const server_slot & slot) {
  274. n_tokens_predicted_total += slot.n_decoded;
  275. n_tokens_predicted += slot.n_decoded;
  276. t_tokens_generation += slot.t_token_generation;
  277. t_tokens_generation_total += slot.t_token_generation;
  278. }
  279. void on_decoded(const std::vector<server_slot> & slots) {
  280. n_decode_total++;
  281. for (const auto & slot : slots) {
  282. if (slot.is_processing()) {
  283. n_busy_slots_total++;
  284. }
  285. }
  286. }
  287. void reset_bucket() {
  288. n_prompt_tokens_processed = 0;
  289. t_prompt_processing = 0;
  290. n_tokens_predicted = 0;
  291. t_tokens_generation = 0;
  292. }
  293. };
  294. struct server_queue {
  295. int id = 0;
  296. bool running;
  297. // queues
  298. std::deque<server_task> queue_tasks;
  299. std::deque<server_task> queue_tasks_deferred;
  300. std::mutex mutex_tasks;
  301. std::condition_variable condition_tasks;
  302. // callback functions
  303. std::function<void(server_task&)> callback_new_task;
  304. std::function<void(void)> callback_update_slots;
  305. // Add a new task to the end of the queue
  306. int post(server_task task, bool front = false) {
  307. std::unique_lock<std::mutex> lock(mutex_tasks);
  308. if (task.id == -1) {
  309. task.id = id++;
  310. }
  311. QUE_DBG("new task, id = %d, front = %d\n", task.id, front);
  312. if (front) {
  313. queue_tasks.push_front(std::move(task));
  314. } else {
  315. queue_tasks.push_back(std::move(task));
  316. }
  317. condition_tasks.notify_one();
  318. return task.id;
  319. }
  320. // multi-task version of post()
  321. int post(std::vector<server_task> & tasks, bool front = false) {
  322. std::unique_lock<std::mutex> lock(mutex_tasks);
  323. for (auto & task : tasks) {
  324. if (task.id == -1) {
  325. task.id = id++;
  326. }
  327. QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int) tasks.size(), front);
  328. if (front) {
  329. queue_tasks.push_front(std::move(task));
  330. } else {
  331. queue_tasks.push_back(std::move(task));
  332. }
  333. }
  334. condition_tasks.notify_one();
  335. return 0;
  336. }
  337. // Add a new task, but defer until one slot is available
  338. void defer(server_task task) {
  339. std::unique_lock<std::mutex> lock(mutex_tasks);
  340. QUE_DBG("defer task, id = %d\n", task.id);
  341. queue_tasks_deferred.push_back(std::move(task));
  342. condition_tasks.notify_one();
  343. }
  344. // Get the next id for creating a new task
  345. int get_new_id() {
  346. std::unique_lock<std::mutex> lock(mutex_tasks);
  347. int new_id = id++;
  348. return new_id;
  349. }
  350. // Register function to process a new task
  351. void on_new_task(std::function<void(server_task &)> callback) {
  352. callback_new_task = std::move(callback);
  353. }
  354. // Register the function to be called when all slots data is ready to be processed
  355. void on_update_slots(std::function<void(void)> callback) {
  356. callback_update_slots = std::move(callback);
  357. }
  358. // Call when the state of one slot is changed, it will move one task from deferred to main queue
  359. void pop_deferred_task() {
  360. std::unique_lock<std::mutex> lock(mutex_tasks);
  361. if (!queue_tasks_deferred.empty()) {
  362. queue_tasks.emplace_back(std::move(queue_tasks_deferred.front()));
  363. queue_tasks_deferred.pop_front();
  364. }
  365. condition_tasks.notify_one();
  366. }
  367. // end the start_loop routine
  368. void terminate() {
  369. std::unique_lock<std::mutex> lock(mutex_tasks);
  370. running = false;
  371. condition_tasks.notify_all();
  372. }
  373. /**
  374. * Main loop consists of these steps:
  375. * - Wait until a new task arrives
  376. * - Process the task (i.e. maybe copy data into slot)
  377. * - Check if multitask is finished
  378. * - Update all slots
  379. */
  380. void start_loop() {
  381. running = true;
  382. while (true) {
  383. QUE_DBG("%s", "processing new tasks\n");
  384. while (true) {
  385. std::unique_lock<std::mutex> lock(mutex_tasks);
  386. if (queue_tasks.empty()) {
  387. lock.unlock();
  388. break;
  389. }
  390. server_task task = queue_tasks.front();
  391. queue_tasks.pop_front();
  392. lock.unlock();
  393. QUE_DBG("processing task, id = %d\n", task.id);
  394. callback_new_task(task);
  395. }
  396. // all tasks in the current loop is processed, slots data is now ready
  397. QUE_DBG("%s", "update slots\n");
  398. callback_update_slots();
  399. QUE_DBG("%s", "waiting for new tasks\n");
  400. {
  401. std::unique_lock<std::mutex> lock(mutex_tasks);
  402. if (queue_tasks.empty()) {
  403. if (!running) {
  404. QUE_DBG("%s", "terminate\n");
  405. return;
  406. }
  407. condition_tasks.wait(lock, [&]{
  408. return (!queue_tasks.empty() || !running);
  409. });
  410. }
  411. }
  412. }
  413. }
  414. };
  415. struct server_response {
  416. // for keeping track of all tasks waiting for the result
  417. std::unordered_set<int> waiting_task_ids;
  418. // the main result queue
  419. std::vector<server_task_result> queue_results;
  420. std::mutex mutex_results;
  421. std::condition_variable condition_results;
  422. // add the id_task to the list of tasks waiting for response
  423. void add_waiting_task_id(int id_task) {
  424. SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size());
  425. std::unique_lock<std::mutex> lock(mutex_results);
  426. waiting_task_ids.insert(id_task);
  427. }
  428. void add_waiting_tasks(const std::vector<server_task> & tasks) {
  429. std::unique_lock<std::mutex> lock(mutex_results);
  430. for (const auto & task : tasks) {
  431. SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", task.id, (int) waiting_task_ids.size());
  432. waiting_task_ids.insert(task.id);
  433. }
  434. }
  435. // when the request is finished, we can remove task associated with it
  436. void remove_waiting_task_id(int id_task) {
  437. SRV_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
  438. std::unique_lock<std::mutex> lock(mutex_results);
  439. waiting_task_ids.erase(id_task);
  440. }
  441. void remove_waiting_task_ids(const std::unordered_set<int> & id_tasks) {
  442. std::unique_lock<std::mutex> lock(mutex_results);
  443. for (const auto & id_task : id_tasks) {
  444. SRV_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
  445. waiting_task_ids.erase(id_task);
  446. }
  447. }
  448. // This function blocks the thread until there is a response for one of the id_tasks
  449. server_task_result recv(const std::unordered_set<int> & id_tasks) {
  450. while (true) {
  451. std::unique_lock<std::mutex> lock(mutex_results);
  452. condition_results.wait(lock, [&]{
  453. return !queue_results.empty();
  454. });
  455. for (int i = 0; i < (int) queue_results.size(); i++) {
  456. if (id_tasks.find(queue_results[i].id) != id_tasks.end()) {
  457. server_task_result res = queue_results[i];
  458. queue_results.erase(queue_results.begin() + i);
  459. return res;
  460. }
  461. }
  462. }
  463. // should never reach here
  464. }
  465. // single-task version of recv()
  466. server_task_result recv(int id_task) {
  467. std::unordered_set<int> id_tasks = {id_task};
  468. return recv(id_tasks);
  469. }
  470. // Send a new result to a waiting id_task
  471. void send(server_task_result & result) {
  472. SRV_DBG("sending result for task id = %d\n", result.id);
  473. std::unique_lock<std::mutex> lock(mutex_results);
  474. for (const auto & id_task : waiting_task_ids) {
  475. if (result.id == id_task) {
  476. SRV_DBG("task id = %d moved to result queue\n", result.id);
  477. queue_results.push_back(std::move(result));
  478. condition_results.notify_all();
  479. return;
  480. }
  481. }
  482. }
  483. };
  484. struct server_context {
  485. llama_model * model = nullptr;
  486. llama_context * ctx = nullptr;
  487. std::vector<common_lora_adapter_container> loras;
  488. common_params params;
  489. llama_batch batch = {};
  490. bool clean_kv_cache = true;
  491. bool add_bos_token = true;
  492. bool has_eos_token = false;
  493. int32_t n_ctx; // total context for all clients / slots
  494. // slots / clients
  495. std::vector<server_slot> slots;
  496. json default_generation_settings_for_props;
  497. server_queue queue_tasks;
  498. server_response queue_results;
  499. server_metrics metrics;
  500. // Necessary similarity of prompt for slot selection
  501. float slot_prompt_similarity = 0.0f;
  502. ~server_context() {
  503. if (ctx) {
  504. llama_free(ctx);
  505. ctx = nullptr;
  506. }
  507. if (model) {
  508. llama_free_model(model);
  509. model = nullptr;
  510. }
  511. // Clear any sampling context
  512. for (server_slot & slot : slots) {
  513. if (slot.smpl != nullptr) {
  514. common_sampler_free(slot.smpl);
  515. }
  516. }
  517. llama_batch_free(batch);
  518. }
  519. bool load_model(const common_params & params_) {
  520. params = params_;
  521. // reserve one extra sequence (seq_id == 0) for extra features
  522. params.n_parallel += 1;
  523. common_init_result llama_init = common_init_from_params(params);
  524. model = llama_init.model;
  525. ctx = llama_init.context;
  526. loras = llama_init.lora_adapters;
  527. params.n_parallel -= 1; // but be sneaky about it
  528. if (model == nullptr) {
  529. SRV_ERR("failed to load model, '%s'\n", params.model.c_str());
  530. return false;
  531. }
  532. n_ctx = llama_n_ctx(ctx);
  533. add_bos_token = llama_add_bos_token(model);
  534. has_eos_token = !llama_add_eos_token(model);
  535. return true;
  536. }
  537. bool validate_model_chat_template() const {
  538. llama_chat_message chat[] = {{"user", "test"}};
  539. const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0);
  540. return res > 0;
  541. }
  542. void init() {
  543. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  544. SRV_INF("initializing slots, n_slots = %d\n", params.n_parallel);
  545. for (int i = 0; i < params.n_parallel; i++) {
  546. server_slot slot;
  547. slot.id = i;
  548. slot.n_ctx = n_ctx_slot;
  549. slot.n_predict = params.n_predict;
  550. SLT_INF(slot, "new slot n_ctx_slot = %d\n", slot.n_ctx);
  551. slot.sparams = params.sparams;
  552. slot.callback_on_release = [this](int) {
  553. queue_tasks.pop_deferred_task();
  554. };
  555. slot.reset();
  556. slots.push_back(slot);
  557. }
  558. default_generation_settings_for_props = get_formated_generation(slots.front());
  559. default_generation_settings_for_props["seed"] = -1;
  560. // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
  561. // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
  562. {
  563. const int32_t n_batch = llama_n_batch(ctx);
  564. // only a single seq_id per token is needed
  565. batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
  566. }
  567. metrics.init();
  568. }
  569. server_slot * get_slot_by_id(int id) {
  570. for (server_slot & slot : slots) {
  571. if (slot.id == id) {
  572. return &slot;
  573. }
  574. }
  575. return nullptr;
  576. }
  577. server_slot * get_available_slot(const server_task & task) {
  578. server_slot * ret = nullptr;
  579. // find the slot that has at least n% prompt similarity
  580. if (ret == nullptr && slot_prompt_similarity != 0.0f) {
  581. int lcs_len = 0;
  582. float similarity = 0;
  583. for (server_slot & slot : slots) {
  584. // skip the slot if it is not available
  585. if (slot.is_processing()) {
  586. continue;
  587. }
  588. // skip the slot if it does not contains cached tokens
  589. if (slot.cache_tokens.empty()) {
  590. continue;
  591. }
  592. // length of the Longest Common Subsequence between the current slot's prompt and the input prompt
  593. int cur_lcs_len = longest_common_subsequence(slot.cache_tokens, task.prompt_tokens);
  594. // fraction of the common subsequence length compared to the current slot's prompt length
  595. float cur_similarity = static_cast<float>(cur_lcs_len) / static_cast<int>(slot.cache_tokens.size());
  596. // select the current slot if the criteria match
  597. if (cur_lcs_len > lcs_len && cur_similarity > slot_prompt_similarity) {
  598. lcs_len = cur_lcs_len;
  599. similarity = cur_similarity;
  600. ret = &slot;
  601. }
  602. }
  603. if (ret != nullptr) {
  604. SLT_DBG(*ret, "selected slot by lcs similarity, lcs_len = %d, similarity = %f\n", lcs_len, similarity);
  605. }
  606. }
  607. // find the slot that has been least recently used
  608. if (ret == nullptr) {
  609. int64_t t_last = ggml_time_us();
  610. for (server_slot & slot : slots) {
  611. // skip the slot if it is not available
  612. if (slot.is_processing()) {
  613. continue;
  614. }
  615. // select the current slot if the criteria match
  616. if (slot.t_last_used < t_last) {
  617. t_last = slot.t_last_used;
  618. ret = &slot;
  619. }
  620. }
  621. if (ret != nullptr) {
  622. SLT_DBG(*ret, "selected slot by lru, t_last = %" PRId64 "\n", t_last);
  623. }
  624. }
  625. return ret;
  626. }
  627. bool launch_slot_with_task(server_slot & slot, const server_task & task) {
  628. slot_params default_params;
  629. // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them)
  630. auto default_sparams = params.sparams;
  631. const auto & data = task.data;
  632. if (data.count("__oaicompat") != 0) {
  633. slot.oaicompat = true;
  634. slot.oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  635. } else {
  636. slot.oaicompat = false;
  637. slot.oaicompat_model = "";
  638. }
  639. slot.params.stream = json_value(data, "stream", false);
  640. slot.params.cache_prompt = json_value(data, "cache_prompt", false);
  641. slot.params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", default_params.n_predict));
  642. slot.params.n_indent = json_value(data, "n_indent", default_params.n_indent);
  643. slot.sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  644. slot.sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  645. slot.sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  646. slot.sparams.xtc_probability = json_value(data, "xtc_probability", default_sparams.xtc_probability);
  647. slot.sparams.xtc_threshold = json_value(data, "xtc_threshold", default_sparams.xtc_threshold);
  648. slot.sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p);
  649. slot.sparams.temp = json_value(data, "temperature", default_sparams.temp);
  650. slot.sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  651. slot.sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  652. slot.sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  653. slot.sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  654. slot.sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  655. slot.sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  656. slot.sparams.dry_multiplier = json_value(data, "dry_multiplier", default_sparams.dry_multiplier);
  657. slot.sparams.dry_base = json_value(data, "dry_base", default_sparams.dry_base);
  658. slot.sparams.dry_allowed_length = json_value(data, "dry_allowed_length", default_sparams.dry_allowed_length);
  659. slot.sparams.dry_penalty_last_n = json_value(data, "dry_penalty_last_n", default_sparams.dry_penalty_last_n);
  660. slot.sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  661. slot.sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  662. slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  663. slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  664. slot.params.n_keep = json_value(data, "n_keep", default_params.n_keep);
  665. slot.params.n_discard = json_value(data, "n_discard", default_params.n_discard);
  666. slot.sparams.seed = json_value(data, "seed", default_sparams.seed);
  667. slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  668. slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  669. //slot.params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", default_params.t_max_prompt_ms); // TODO: implement
  670. slot.params.t_max_predict_ms = json_value(data, "t_max_predict_ms", default_params.t_max_predict_ms);
  671. if (slot.sparams.dry_base < 1.0f)
  672. {
  673. slot.sparams.dry_base = default_sparams.dry_base;
  674. }
  675. // sequence breakers for DRY
  676. {
  677. // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format
  678. // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39
  679. if (data.contains("dry_sequence_breakers")) {
  680. slot.sparams.dry_sequence_breakers = json_value(data, "dry_sequence_breakers", std::vector<std::string>());
  681. if (slot.sparams.dry_sequence_breakers.empty()) {
  682. send_error(task, "Error: dry_sequence_breakers must be a non-empty array of strings", ERROR_TYPE_INVALID_REQUEST);
  683. return false;
  684. }
  685. }
  686. }
  687. // process "json_schema" and "grammar"
  688. if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
  689. send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
  690. return false;
  691. }
  692. if (data.contains("json_schema") && !data.contains("grammar")) {
  693. try {
  694. auto schema = json_value(data, "json_schema", json::object());
  695. slot.sparams.grammar = json_schema_to_grammar(schema);
  696. } catch (const std::exception & e) {
  697. send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
  698. return false;
  699. }
  700. } else {
  701. slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  702. }
  703. if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) {
  704. // Might be better to reject the request with a 400 ?
  705. slot.params.n_predict = slot.n_predict;
  706. SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.n_predict, slot.n_predict);
  707. }
  708. {
  709. slot.sparams.logit_bias.clear();
  710. if (json_value(data, "ignore_eos", false) && has_eos_token) {
  711. slot.sparams.logit_bias.push_back({llama_token_eos(model), -INFINITY});
  712. }
  713. const auto & logit_bias = data.find("logit_bias");
  714. if (logit_bias != data.end() && logit_bias->is_array()) {
  715. const int n_vocab = llama_n_vocab(model);
  716. for (const auto & el : *logit_bias) {
  717. // TODO: we may want to throw errors here, in case "el" is incorrect
  718. if (el.is_array() && el.size() == 2) {
  719. float bias;
  720. if (el[1].is_number()) {
  721. bias = el[1].get<float>();
  722. } else if (el[1].is_boolean() && !el[1].get<bool>()) {
  723. bias = -INFINITY;
  724. } else {
  725. continue;
  726. }
  727. if (el[0].is_number_integer()) {
  728. llama_token tok = el[0].get<llama_token>();
  729. if (tok >= 0 && tok < n_vocab) {
  730. slot.sparams.logit_bias.push_back({tok, bias});
  731. }
  732. } else if (el[0].is_string()) {
  733. auto toks = common_tokenize(model, el[0].get<std::string>(), false);
  734. for (auto tok : toks) {
  735. slot.sparams.logit_bias.push_back({tok, bias});
  736. }
  737. }
  738. }
  739. }
  740. }
  741. }
  742. {
  743. slot.params.antiprompt.clear();
  744. const auto & stop = data.find("stop");
  745. if (stop != data.end() && stop->is_array()) {
  746. for (const auto & word : *stop) {
  747. if (!word.empty()) {
  748. slot.params.antiprompt.push_back(word);
  749. }
  750. }
  751. }
  752. }
  753. {
  754. const auto & samplers = data.find("samplers");
  755. if (samplers != data.end() && samplers->is_array()) {
  756. std::vector<std::string> sampler_names;
  757. for (const auto & name : *samplers) {
  758. if (name.is_string()) {
  759. sampler_names.emplace_back(name);
  760. }
  761. }
  762. slot.sparams.samplers = common_sampler_types_from_names(sampler_names, false);
  763. } else {
  764. slot.sparams.samplers = default_sparams.samplers;
  765. }
  766. }
  767. {
  768. if (slot.smpl != nullptr) {
  769. common_sampler_free(slot.smpl);
  770. }
  771. slot.smpl = common_sampler_init(model, slot.sparams);
  772. if (slot.smpl == nullptr) {
  773. // for now, the only error that may happen here is invalid grammar
  774. send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST);
  775. return false;
  776. }
  777. }
  778. slot.state = SLOT_STATE_STARTED;
  779. SLT_INF(slot, "%s", "processing task\n");
  780. return true;
  781. }
  782. void kv_cache_clear() {
  783. SRV_DBG("%s", "clearing KV cache\n");
  784. // clear the entire KV cache
  785. llama_kv_cache_clear(ctx);
  786. clean_kv_cache = false;
  787. }
  788. bool process_token(completion_token_output & result, server_slot & slot) {
  789. // remember which tokens were sampled - used for repetition penalties during sampling
  790. const std::string token_str = common_token_to_piece(ctx, result.tok, params.special);
  791. slot.sampled = result.tok;
  792. // search stop word and delete it
  793. slot.generated_text += token_str;
  794. slot.has_next_token = true;
  795. // check if there is incomplete UTF-8 character at the end
  796. bool incomplete = false;
  797. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) {
  798. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  799. if ((c & 0xC0) == 0x80) {
  800. // continuation byte: 10xxxxxx
  801. continue;
  802. }
  803. if ((c & 0xE0) == 0xC0) {
  804. // 2-byte character: 110xxxxx ...
  805. incomplete = i < 2;
  806. } else if ((c & 0xF0) == 0xE0) {
  807. // 3-byte character: 1110xxxx ...
  808. incomplete = i < 3;
  809. } else if ((c & 0xF8) == 0xF0) {
  810. // 4-byte character: 11110xxx ...
  811. incomplete = i < 4;
  812. }
  813. // else 1-byte character or invalid byte
  814. break;
  815. }
  816. if (!incomplete) {
  817. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  818. const std::string str_test = slot.generated_text.substr(pos);
  819. bool send_text = true;
  820. size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_FULL);
  821. if (stop_pos != std::string::npos) {
  822. slot.generated_text.erase(
  823. slot.generated_text.begin() + pos + stop_pos,
  824. slot.generated_text.end());
  825. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  826. } else if (slot.has_next_token) {
  827. stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_PARTIAL);
  828. send_text = stop_pos == std::string::npos;
  829. }
  830. // check if there is any token to predict
  831. if (send_text) {
  832. // no send the stop word in the response
  833. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  834. slot.n_sent_text += result.text_to_send.size();
  835. // add the token to slot queue and cache
  836. }
  837. slot.add_token(result);
  838. if (slot.params.stream) {
  839. send_partial_response(slot, result);
  840. }
  841. }
  842. if (incomplete) {
  843. slot.has_next_token = true;
  844. }
  845. // check the limits
  846. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) {
  847. slot.stopped_limit = true;
  848. slot.has_next_token = false;
  849. SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict);
  850. }
  851. if (slot.has_new_line) {
  852. // if we have already seen a new line, we stop after a certain time limit
  853. if (slot.params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) {
  854. slot.stopped_limit = true;
  855. slot.has_next_token = false;
  856. SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms);
  857. }
  858. // require that each new line has a whitespace prefix (i.e. indentation) of at least slot.params.n_indent
  859. if (slot.params.n_indent > 0) {
  860. // check the current indentation
  861. // TODO: improve by not doing it more than once for each new line
  862. if (slot.last_nl_pos > 0) {
  863. size_t pos = slot.last_nl_pos;
  864. int n_indent = 0;
  865. while (pos < slot.generated_text.size() && (slot.generated_text[pos] == ' ' || slot.generated_text[pos] == '\t')) {
  866. n_indent++;
  867. pos++;
  868. }
  869. if (pos < slot.generated_text.size() && n_indent < slot.params.n_indent) {
  870. slot.stopped_limit = true;
  871. slot.has_next_token = false;
  872. // cut the last line
  873. slot.generated_text.erase(pos, std::string::npos);
  874. SLT_DBG(slot, "stopped by indentation limit, n_decoded = %d, n_indent = %d\n", slot.n_decoded, n_indent);
  875. }
  876. }
  877. // find the next new line
  878. {
  879. const size_t pos = slot.generated_text.find('\n', slot.last_nl_pos);
  880. if (pos != std::string::npos) {
  881. slot.last_nl_pos = pos + 1;
  882. }
  883. }
  884. }
  885. }
  886. // check if there is a new line in the generated text
  887. if (result.text_to_send.find('\n') != std::string::npos) {
  888. slot.has_new_line = true;
  889. }
  890. // if context shift is disabled, we stop when it reaches the context limit
  891. if (slot.n_past >= slot.n_ctx) {
  892. slot.truncated = true;
  893. slot.stopped_limit = true;
  894. slot.has_next_token = false;
  895. SLT_DBG(slot, "stopped due to running out of context capacity, n_past = %d, n_prompt_tokens = %d, n_decoded = %d, n_ctx = %d\n",
  896. slot.n_decoded, slot.n_prompt_tokens, slot.n_past, slot.n_ctx);
  897. }
  898. if (llama_token_is_eog(model, result.tok)) {
  899. slot.stopped_eos = true;
  900. slot.has_next_token = false;
  901. SLT_DBG(slot, "%s", "stopped by EOS\n");
  902. }
  903. const auto n_ctx_train = llama_n_ctx_train(model);
  904. if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
  905. slot.truncated = true;
  906. slot.stopped_limit = true;
  907. slot.has_next_token = false; // stop prediction
  908. SLT_WRN(slot,
  909. "n_predict (%d) is set for infinite generation. "
  910. "Limiting generated tokens to n_ctx_train (%d) to avoid EOS-less generation infinite loop\n",
  911. slot.params.n_predict, n_ctx_train);
  912. }
  913. SLT_DBG(slot, "n_decoded = %d, n_remaining = %d, next token: %5d '%s'\n", slot.n_decoded, slot.n_remaining, result.tok, token_str.c_str());
  914. return slot.has_next_token; // continue
  915. }
  916. json get_formated_generation(const server_slot & slot) const {
  917. std::vector<std::string> samplers;
  918. samplers.reserve(slot.sparams.samplers.size());
  919. for (const auto & sampler : slot.sparams.samplers) {
  920. samplers.emplace_back(common_sampler_type_to_str(sampler));
  921. }
  922. return json {
  923. {"n_ctx", slot.n_ctx},
  924. {"n_predict", slot.n_predict}, // Server configured n_predict
  925. {"model", params.model_alias},
  926. {"seed", slot.sparams.seed},
  927. {"seed_cur", slot.smpl ? common_sampler_get_seed(slot.smpl) : 0},
  928. {"temperature", slot.sparams.temp},
  929. {"dynatemp_range", slot.sparams.dynatemp_range},
  930. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  931. {"top_k", slot.sparams.top_k},
  932. {"top_p", slot.sparams.top_p},
  933. {"min_p", slot.sparams.min_p},
  934. {"xtc_probability", slot.sparams.xtc_probability},
  935. {"xtc_threshold", slot.sparams.xtc_threshold},
  936. {"typical_p", slot.sparams.typ_p},
  937. {"repeat_last_n", slot.sparams.penalty_last_n},
  938. {"repeat_penalty", slot.sparams.penalty_repeat},
  939. {"presence_penalty", slot.sparams.penalty_present},
  940. {"frequency_penalty", slot.sparams.penalty_freq},
  941. {"dry_multiplier", slot.sparams.dry_multiplier},
  942. {"dry_base", slot.sparams.dry_base},
  943. {"dry_allowed_length", slot.sparams.dry_allowed_length},
  944. {"dry_penalty_last_n", slot.sparams.dry_penalty_last_n},
  945. {"dry_sequence_breakers", slot.sparams.dry_sequence_breakers},
  946. {"mirostat", slot.sparams.mirostat},
  947. {"mirostat_tau", slot.sparams.mirostat_tau},
  948. {"mirostat_eta", slot.sparams.mirostat_eta},
  949. {"penalize_nl", slot.sparams.penalize_nl},
  950. {"stop", slot.params.antiprompt},
  951. {"max_tokens", slot.params.n_predict}, // User configured n_predict
  952. {"n_keep", slot.params.n_keep},
  953. {"n_discard", slot.params.n_discard},
  954. {"ignore_eos", slot.sparams.ignore_eos},
  955. {"stream", slot.params.stream},
  956. //{"logit_bias", slot.sparams.logit_bias},
  957. {"n_probs", slot.sparams.n_probs},
  958. {"min_keep", slot.sparams.min_keep},
  959. {"grammar", slot.sparams.grammar},
  960. {"samplers", samplers},
  961. };
  962. }
  963. void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  964. send_error(task.id, error, type);
  965. }
  966. void send_error(const server_slot & slot, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  967. send_error(slot.id_task, error, type);
  968. }
  969. void send_error(const int id_task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  970. SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str());
  971. server_task_result res;
  972. res.id = id_task;
  973. res.stop = false;
  974. res.error = true;
  975. res.data = format_error_response(error, type);
  976. queue_results.send(res);
  977. }
  978. void send_partial_response(server_slot & slot, completion_token_output tkn) {
  979. server_task_result res;
  980. res.id = slot.id_task;
  981. res.error = false;
  982. res.stop = false;
  983. res.data = json {
  984. {"content", tkn.text_to_send},
  985. {"stop", false},
  986. {"id_slot", slot.id},
  987. {"multimodal", false},
  988. {"index", slot.index},
  989. };
  990. if (slot.sparams.n_probs > 0) {
  991. const llama_tokens to_send_toks = common_tokenize(ctx, tkn.text_to_send, false);
  992. const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  993. const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  994. std::vector<completion_token_output> probs_output;
  995. if (probs_pos < probs_stop_pos) {
  996. probs_output = std::vector<completion_token_output>(
  997. slot.generated_token_probs.begin() + probs_pos,
  998. slot.generated_token_probs.begin() + probs_stop_pos);
  999. }
  1000. slot.n_sent_token_probs = probs_stop_pos;
  1001. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  1002. }
  1003. if (slot.oaicompat) {
  1004. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1005. res.data["model"] = slot.oaicompat_model;
  1006. }
  1007. queue_results.send(res);
  1008. }
  1009. void send_final_response(const server_slot & slot) {
  1010. server_task_result res;
  1011. res.id = slot.id_task;
  1012. res.error = false;
  1013. res.stop = true;
  1014. res.data = json {
  1015. {"content", !slot.params.stream ? slot.generated_text : ""},
  1016. {"id_slot", slot.id},
  1017. {"stop", true},
  1018. {"model", params.model_alias},
  1019. {"tokens_predicted", slot.n_decoded},
  1020. {"tokens_evaluated", slot.n_prompt_tokens},
  1021. {"generation_settings", get_formated_generation(slot)},
  1022. {"prompt", common_detokenize(ctx, slot.prompt_tokens)},
  1023. {"has_new_line", slot.has_new_line},
  1024. {"truncated", slot.truncated},
  1025. {"stopped_eos", slot.stopped_eos},
  1026. {"stopped_word", slot.stopped_word},
  1027. {"stopped_limit", slot.stopped_limit},
  1028. {"stopping_word", slot.stopping_word},
  1029. {"tokens_cached", slot.n_past},
  1030. {"timings", slot.get_formated_timings()},
  1031. {"index", slot.index},
  1032. };
  1033. if (slot.sparams.n_probs > 0) {
  1034. std::vector<completion_token_output> probs;
  1035. if (!slot.params.stream && slot.stopped_word) {
  1036. const llama_tokens stop_word_toks = common_tokenize(ctx, slot.stopping_word, false);
  1037. size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
  1038. probs = std::vector<completion_token_output>(
  1039. slot.generated_token_probs.begin(),
  1040. slot.generated_token_probs.end() - safe_offset);
  1041. } else {
  1042. probs = std::vector<completion_token_output>(
  1043. slot.generated_token_probs.begin(),
  1044. slot.generated_token_probs.end());
  1045. }
  1046. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1047. }
  1048. if (slot.oaicompat) {
  1049. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1050. res.data["model"] = slot.oaicompat_model;
  1051. }
  1052. queue_results.send(res);
  1053. }
  1054. void send_embedding(const server_slot & slot, const llama_batch & batch) {
  1055. server_task_result res;
  1056. res.id = slot.id_task;
  1057. res.error = false;
  1058. res.stop = true;
  1059. const int n_embd = llama_n_embd(model);
  1060. std::vector<float> embd_res(n_embd, 0.0f);
  1061. for (int i = 0; i < batch.n_tokens; ++i) {
  1062. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id + 1) {
  1063. continue;
  1064. }
  1065. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1066. if (embd == NULL) {
  1067. embd = llama_get_embeddings_ith(ctx, i);
  1068. }
  1069. if (embd == NULL) {
  1070. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1071. res.data = json {
  1072. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1073. {"index", slot.index},
  1074. };
  1075. continue;
  1076. }
  1077. common_embd_normalize(embd, embd_res.data(), n_embd);
  1078. res.data = json {
  1079. {"embedding", embd_res},
  1080. {"index", slot.index},
  1081. };
  1082. }
  1083. SLT_DBG(slot, "%s", "sending embeddings\n");
  1084. queue_results.send(res);
  1085. }
  1086. void send_rerank(const server_slot & slot, const llama_batch & batch) {
  1087. server_task_result res;
  1088. res.id = slot.id_task;
  1089. res.error = false;
  1090. res.stop = true;
  1091. for (int i = 0; i < batch.n_tokens; ++i) {
  1092. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id + 1) {
  1093. continue;
  1094. }
  1095. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1096. if (embd == NULL) {
  1097. embd = llama_get_embeddings_ith(ctx, i);
  1098. }
  1099. if (embd == NULL) {
  1100. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1101. res.data = json {
  1102. {"index", slot.index},
  1103. {"score", -1e6},
  1104. };
  1105. continue;
  1106. }
  1107. res.data = json {
  1108. {"index", slot.index},
  1109. {"score", embd[0]},
  1110. };
  1111. }
  1112. SLT_DBG(slot, "sending rerank result, res = '%s'\n", res.data.dump().c_str());
  1113. queue_results.send(res);
  1114. }
  1115. //
  1116. // Functions to create new task(s) and receive result(s)
  1117. //
  1118. // break the input "prompt" into multiple tasks if needed, then format and tokenize the input prompt(s)
  1119. std::vector<server_task> create_tasks_inference(json data, server_task_inf_type inf_type) {
  1120. std::vector<server_task> tasks;
  1121. auto create_task = [&](json & task_data, llama_tokens & prompt_tokens) {
  1122. SRV_DBG("create task, n_tokens = %d\n", (int) prompt_tokens.size());
  1123. server_task task;
  1124. task.id = queue_tasks.get_new_id();
  1125. task.inf_type = inf_type;
  1126. task.type = SERVER_TASK_TYPE_INFERENCE;
  1127. task.data = task_data;
  1128. task.prompt_tokens = std::move(prompt_tokens);
  1129. tasks.push_back(std::move(task));
  1130. };
  1131. static constexpr const char * error_msg = "\"prompt\" must be a string, an array of token ids or an array of prompts";
  1132. if (!data.contains("prompt")) {
  1133. throw std::runtime_error(error_msg);
  1134. }
  1135. // because llama_tokenize api is thread-safe, we can tokenize the prompt from HTTP thread
  1136. bool add_special = inf_type != SERVER_TASK_INF_TYPE_RERANK && inf_type != SERVER_TASK_INF_TYPE_INFILL;
  1137. std::vector<llama_tokens> tokenized_prompts = tokenize_input_prompts(ctx, data.at("prompt"), add_special, true);
  1138. switch (inf_type) {
  1139. case SERVER_TASK_INF_TYPE_RERANK:
  1140. {
  1141. // prompts[0] is the question
  1142. // the rest are the answers/documents
  1143. GGML_ASSERT(tokenized_prompts.size() > 1);
  1144. SRV_DBG("creating rerank tasks, n_prompts = %d\n", (int) tokenized_prompts.size() - 1);
  1145. for (size_t i = 1; i < tokenized_prompts.size(); i++) {
  1146. data["index"] = i - 1;
  1147. auto tokens = format_rerank(model, tokenized_prompts[0], tokenized_prompts[i]);
  1148. create_task(data, tokens);
  1149. }
  1150. } break;
  1151. case SERVER_TASK_INF_TYPE_INFILL:
  1152. {
  1153. SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
  1154. for (size_t i = 0; i < tokenized_prompts.size(); i++) {
  1155. data["index"] = i;
  1156. auto tokens = format_infill(
  1157. ctx,
  1158. data.at("input_prefix"),
  1159. data.at("input_suffix"),
  1160. data.at("input_extra"),
  1161. params.n_batch,
  1162. params.n_predict,
  1163. slots[0].n_ctx, // TODO: there should be a better way
  1164. params.spm_infill,
  1165. tokenized_prompts[i]
  1166. );
  1167. create_task(data, tokens);
  1168. }
  1169. } break;
  1170. default:
  1171. {
  1172. SRV_DBG("creating multi-prompt tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
  1173. for (size_t i = 0; i < tokenized_prompts.size(); i++) {
  1174. data["index"] = i;
  1175. create_task(data, tokenized_prompts[i]);
  1176. }
  1177. }
  1178. }
  1179. return tasks;
  1180. }
  1181. void cancel_tasks(const std::unordered_set<int> & id_tasks) {
  1182. std::vector<server_task> cancel_tasks;
  1183. cancel_tasks.reserve(id_tasks.size());
  1184. for (const auto & id_task : id_tasks) {
  1185. SRV_WRN("cancel task, id_task = %d\n", id_task);
  1186. server_task task;
  1187. task.type = SERVER_TASK_TYPE_CANCEL;
  1188. task.id_target = id_task;
  1189. cancel_tasks.push_back(task);
  1190. queue_results.remove_waiting_task_id(id_task);
  1191. }
  1192. // push to beginning of the queue, so it has highest priority
  1193. queue_tasks.post(cancel_tasks, true);
  1194. }
  1195. // receive the results from task(s) created by create_tasks_inference
  1196. void receive_cmpl_results(
  1197. const std::unordered_set<int> & id_tasks,
  1198. const std::function<void(std::vector<server_task_result>&)> & result_handler,
  1199. const std::function<void(json)> & error_handler) {
  1200. // TODO: currently, there is no way to detect the client has cancelled the request
  1201. std::vector<server_task_result> results(id_tasks.size());
  1202. for (size_t i = 0; i < id_tasks.size(); i++) {
  1203. server_task_result result = queue_results.recv(id_tasks);
  1204. if (result.error) {
  1205. error_handler(result.data);
  1206. cancel_tasks(id_tasks);
  1207. return;
  1208. }
  1209. const size_t idx = result.data["index"];
  1210. GGML_ASSERT(idx < results.size() && "index out of range");
  1211. results[idx] = result;
  1212. }
  1213. result_handler(results);
  1214. }
  1215. // receive the results from task(s) created by create_tasks_inference, in stream mode
  1216. void receive_cmpl_results_stream(
  1217. const std::unordered_set<int> & id_tasks, const
  1218. std::function<bool(server_task_result&)> & result_handler, const
  1219. std::function<void(json)> & error_handler) {
  1220. size_t n_finished = 0;
  1221. while (true) {
  1222. server_task_result result = queue_results.recv(id_tasks);
  1223. if (!result_handler(result)) {
  1224. cancel_tasks(id_tasks);
  1225. break;
  1226. }
  1227. if (result.error) {
  1228. error_handler(result.data);
  1229. cancel_tasks(id_tasks);
  1230. break;
  1231. }
  1232. if (result.stop) {
  1233. if (++n_finished == id_tasks.size()) {
  1234. break;
  1235. }
  1236. }
  1237. }
  1238. }
  1239. //
  1240. // Functions to process the task
  1241. //
  1242. void process_single_task(const server_task & task) {
  1243. switch (task.type) {
  1244. case SERVER_TASK_TYPE_INFERENCE:
  1245. {
  1246. const int id_slot = json_value(task.data, "id_slot", -1);
  1247. server_slot * slot = id_slot != -1 ? get_slot_by_id(id_slot) : get_available_slot(task);
  1248. if (slot == nullptr) {
  1249. // if no slot is available, we defer this task for processing later
  1250. SRV_DBG("no slot is available, defer task, id_task = %d\n", task.id);
  1251. queue_tasks.defer(task);
  1252. break;
  1253. }
  1254. if (slot->is_processing()) {
  1255. // if requested slot is unavailable, we defer this task for processing later
  1256. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1257. queue_tasks.defer(task);
  1258. break;
  1259. }
  1260. slot->reset();
  1261. slot->id_task = task.id;
  1262. slot->inf_type = task.inf_type;
  1263. slot->index = json_value(task.data, "index", 0);
  1264. slot->prompt_tokens = std::move(task.prompt_tokens);
  1265. if (!launch_slot_with_task(*slot, task)) {
  1266. SRV_ERR("failed to launch slot with task, id_task = %d\n", task.id);
  1267. break;
  1268. }
  1269. } break;
  1270. case SERVER_TASK_TYPE_CANCEL:
  1271. {
  1272. // release slot linked with the task id
  1273. for (auto & slot : slots) {
  1274. if (slot.id_task == task.id_target) {
  1275. slot.release();
  1276. break;
  1277. }
  1278. }
  1279. } break;
  1280. case SERVER_TASK_TYPE_NEXT_RESPONSE:
  1281. {
  1282. // do nothing
  1283. } break;
  1284. case SERVER_TASK_TYPE_METRICS:
  1285. {
  1286. json slots_data = json::array();
  1287. int n_idle_slots = 0;
  1288. int n_processing_slots = 0;
  1289. for (server_slot & slot : slots) {
  1290. json slot_data = get_formated_generation(slot);
  1291. slot_data["id"] = slot.id;
  1292. slot_data["id_task"] = slot.id_task;
  1293. slot_data["state"] = slot.state;
  1294. slot_data["prompt"] = common_detokenize(ctx, slot.prompt_tokens);
  1295. slot_data["next_token"] = {
  1296. {"has_next_token", slot.has_next_token},
  1297. {"has_new_line", slot.has_new_line},
  1298. {"n_remain", slot.n_remaining},
  1299. {"n_decoded", slot.n_decoded},
  1300. {"stopped_eos", slot.stopped_eos},
  1301. {"stopped_word", slot.stopped_word},
  1302. {"stopped_limit", slot.stopped_limit},
  1303. {"stopping_word", slot.stopping_word},
  1304. };
  1305. if (slot_data["state"] == SLOT_STATE_IDLE) {
  1306. n_idle_slots++;
  1307. } else {
  1308. n_processing_slots++;
  1309. }
  1310. slots_data.push_back(slot_data);
  1311. }
  1312. SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots);
  1313. server_task_result res;
  1314. res.id = task.id;
  1315. res.stop = true;
  1316. res.error = false;
  1317. res.data = {
  1318. { "idle", n_idle_slots },
  1319. { "processing", n_processing_slots },
  1320. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1321. { "t_start", metrics.t_start},
  1322. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1323. { "t_tokens_generation_total", metrics.t_tokens_generation_total},
  1324. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1325. { "t_prompt_processing_total", metrics.t_prompt_processing_total},
  1326. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1327. { "t_prompt_processing", metrics.t_prompt_processing},
  1328. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1329. { "t_tokens_generation", metrics.t_tokens_generation},
  1330. { "n_decode_total", metrics.n_decode_total},
  1331. { "n_busy_slots_total", metrics.n_busy_slots_total},
  1332. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1333. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1334. { "slots", slots_data },
  1335. };
  1336. if (json_value(task.data, "reset_bucket", false)) {
  1337. metrics.reset_bucket();
  1338. }
  1339. queue_results.send(res);
  1340. } break;
  1341. case SERVER_TASK_TYPE_SLOT_SAVE:
  1342. {
  1343. int id_slot = task.data.at("id_slot");
  1344. server_slot * slot = get_slot_by_id(id_slot);
  1345. if (slot == nullptr) {
  1346. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1347. break;
  1348. }
  1349. if (slot->is_processing()) {
  1350. // if requested slot is unavailable, we defer this task for processing later
  1351. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1352. queue_tasks.defer(task);
  1353. break;
  1354. }
  1355. const size_t token_count = slot->cache_tokens.size();
  1356. const int64_t t_start = ggml_time_us();
  1357. std::string filename = task.data.at("filename");
  1358. std::string filepath = task.data.at("filepath");
  1359. const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
  1360. const int64_t t_end = ggml_time_us();
  1361. const double t_save_ms = (t_end - t_start) / 1000.0;
  1362. server_task_result result;
  1363. result.id = task.id;
  1364. result.stop = true;
  1365. result.error = false;
  1366. result.data = json {
  1367. { "id_slot", id_slot },
  1368. { "filename", filename },
  1369. { "n_saved", token_count }, // tokens saved
  1370. { "n_written", nwrite }, // bytes written
  1371. { "timings", {
  1372. { "save_ms", t_save_ms }
  1373. } }
  1374. };
  1375. queue_results.send(result);
  1376. } break;
  1377. case SERVER_TASK_TYPE_SLOT_RESTORE:
  1378. {
  1379. int id_slot = task.data.at("id_slot");
  1380. server_slot * slot = get_slot_by_id(id_slot);
  1381. if (slot == nullptr) {
  1382. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1383. break;
  1384. }
  1385. if (slot->is_processing()) {
  1386. // if requested slot is unavailable, we defer this task for processing later
  1387. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1388. queue_tasks.defer(task);
  1389. break;
  1390. }
  1391. const int64_t t_start = ggml_time_us();
  1392. std::string filename = task.data.at("filename");
  1393. std::string filepath = task.data.at("filepath");
  1394. slot->cache_tokens.resize(slot->n_ctx);
  1395. size_t token_count = 0;
  1396. size_t nread = llama_state_seq_load_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), slot->cache_tokens.size(), &token_count);
  1397. if (nread == 0) {
  1398. slot->cache_tokens.resize(0);
  1399. send_error(task, "Unable to restore slot, no available space in KV cache or invalid slot save file", ERROR_TYPE_INVALID_REQUEST);
  1400. break;
  1401. }
  1402. slot->cache_tokens.resize(token_count);
  1403. const int64_t t_end = ggml_time_us();
  1404. const double t_restore_ms = (t_end - t_start) / 1000.0;
  1405. server_task_result result;
  1406. result.id = task.id;
  1407. result.stop = true;
  1408. result.error = false;
  1409. result.data = json {
  1410. { "id_slot", id_slot },
  1411. { "filename", filename },
  1412. { "n_restored", token_count }, // tokens restored
  1413. { "n_read", nread }, // bytes read
  1414. { "timings", {
  1415. { "restore_ms", t_restore_ms }
  1416. } }
  1417. };
  1418. queue_results.send(result);
  1419. } break;
  1420. case SERVER_TASK_TYPE_SLOT_ERASE:
  1421. {
  1422. int id_slot = task.data.at("id_slot");
  1423. server_slot * slot = get_slot_by_id(id_slot);
  1424. if (slot == nullptr) {
  1425. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1426. break;
  1427. }
  1428. if (slot->is_processing()) {
  1429. // if requested slot is unavailable, we defer this task for processing later
  1430. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1431. queue_tasks.defer(task);
  1432. break;
  1433. }
  1434. // Erase token cache
  1435. const size_t n_erased = slot->cache_tokens.size();
  1436. llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1);
  1437. slot->cache_tokens.clear();
  1438. server_task_result result;
  1439. result.id = task.id;
  1440. result.stop = true;
  1441. result.error = false;
  1442. result.data = json {
  1443. { "id_slot", id_slot },
  1444. { "n_erased", n_erased }
  1445. };
  1446. queue_results.send(result);
  1447. } break;
  1448. case SERVER_TASK_TYPE_SET_LORA:
  1449. {
  1450. common_lora_adapters_apply(ctx, loras);
  1451. server_task_result result;
  1452. result.id = task.id;
  1453. result.stop = true;
  1454. result.error = false;
  1455. result.data = json{{ "success", true }};
  1456. queue_results.send(result);
  1457. } break;
  1458. }
  1459. }
  1460. void update_slots() {
  1461. // check if all slots are idle
  1462. {
  1463. bool all_idle = true;
  1464. for (auto & slot : slots) {
  1465. if (slot.is_processing()) {
  1466. all_idle = false;
  1467. break;
  1468. }
  1469. }
  1470. if (all_idle) {
  1471. SRV_INF("%s", "all slots are idle\n");
  1472. if (clean_kv_cache) {
  1473. kv_cache_clear();
  1474. }
  1475. return;
  1476. }
  1477. }
  1478. {
  1479. SRV_DBG("%s", "posting NEXT_RESPONSE\n");
  1480. server_task task;
  1481. task.type = SERVER_TASK_TYPE_NEXT_RESPONSE;
  1482. task.id_target = -1;
  1483. queue_tasks.post(task);
  1484. }
  1485. // apply context-shift if needed
  1486. // TODO: simplify and improve
  1487. for (server_slot & slot : slots) {
  1488. if (slot.is_processing() && slot.n_past + 1 >= slot.n_ctx) {
  1489. if (!params.ctx_shift) {
  1490. // this check is redundant (for good)
  1491. // we should never get here, because generation should already stopped in process_token()
  1492. slot.release();
  1493. send_error(slot, "context shift is disabled", ERROR_TYPE_SERVER);
  1494. continue;
  1495. }
  1496. // Shift context
  1497. const int n_keep = slot.params.n_keep + add_bos_token;
  1498. const int n_left = slot.n_past - n_keep;
  1499. const int n_discard = slot.params.n_discard ? slot.params.n_discard : (n_left / 2);
  1500. SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard);
  1501. llama_kv_cache_seq_rm (ctx, slot.id + 1, n_keep , n_keep + n_discard);
  1502. llama_kv_cache_seq_add(ctx, slot.id + 1, n_keep + n_discard, slot.n_past, -n_discard);
  1503. if (slot.params.cache_prompt) {
  1504. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++) {
  1505. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1506. }
  1507. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1508. }
  1509. slot.n_past -= n_discard;
  1510. slot.truncated = true;
  1511. }
  1512. }
  1513. // start populating the batch for this iteration
  1514. common_batch_clear(batch);
  1515. // frist, add sampled tokens from any ongoing sequences
  1516. for (auto & slot : slots) {
  1517. if (slot.state != SLOT_STATE_GENERATING) {
  1518. continue;
  1519. }
  1520. slot.i_batch = batch.n_tokens;
  1521. common_batch_add(batch, slot.sampled, slot.n_past, { slot.id + 1 }, true);
  1522. slot.n_past += 1;
  1523. if (slot.params.cache_prompt) {
  1524. slot.cache_tokens.push_back(slot.sampled);
  1525. }
  1526. SLT_DBG(slot, "slot decode token, n_ctx = %d, n_past = %d, n_cache_tokens = %d, truncated = %d\n",
  1527. slot.n_ctx, slot.n_past, (int) slot.cache_tokens.size(), slot.truncated);
  1528. }
  1529. // process in chunks of params.n_batch
  1530. int32_t n_batch = llama_n_batch(ctx);
  1531. int32_t n_ubatch = llama_n_ubatch(ctx);
  1532. // track if this is an embedding or non-embedding batch
  1533. // if we've added sampled tokens above, we are in non-embedding mode
  1534. // -1: none, 0: non-embedding, 1: embedding
  1535. // TODO: make enum
  1536. int32_t batch_type = batch.n_tokens > 0 ? 0 : -1;
  1537. // next, batch any pending prompts without exceeding n_batch
  1538. if (params.cont_batching || batch.n_tokens == 0) {
  1539. for (auto & slot : slots) {
  1540. // this slot still has a prompt to be processed
  1541. if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) {
  1542. auto & prompt_tokens = slot.prompt_tokens;
  1543. // TODO: maybe move branch to outside of this loop in the future
  1544. if (slot.state == SLOT_STATE_STARTED) {
  1545. slot.t_start_process_prompt = ggml_time_us();
  1546. slot.t_start_generation = 0;
  1547. slot.n_past = 0;
  1548. slot.n_prompt_tokens = prompt_tokens.size();
  1549. slot.state = SLOT_STATE_PROCESSING_PROMPT;
  1550. SLT_INF(slot, "new prompt, n_ctx_slot = %d, n_keep = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, slot.n_prompt_tokens);
  1551. // print prompt tokens (for debugging)
  1552. if (1) {
  1553. // first 16 tokens (avoid flooding logs)
  1554. for (int i = 0; i < std::min<int>(16, prompt_tokens.size()); i++) {
  1555. SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
  1556. }
  1557. } else {
  1558. // all
  1559. for (int i = 0; i < (int) prompt_tokens.size(); i++) {
  1560. SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
  1561. }
  1562. }
  1563. // empty prompt passed -> release the slot and send empty response
  1564. if (prompt_tokens.empty()) {
  1565. SLT_WRN(slot, "%s", "empty prompt - releasing slot\n");
  1566. slot.release();
  1567. slot.print_timings();
  1568. send_final_response(slot);
  1569. continue;
  1570. }
  1571. if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING || slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
  1572. if (slot.n_prompt_tokens > n_ubatch) {
  1573. slot.release();
  1574. send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
  1575. continue;
  1576. }
  1577. if (slot.n_prompt_tokens > slot.n_ctx) {
  1578. slot.release();
  1579. send_error(slot, "input is larger than the max context size. skipping", ERROR_TYPE_SERVER);
  1580. continue;
  1581. }
  1582. } else {
  1583. if (!params.ctx_shift) {
  1584. // if context shift is disabled, we make sure prompt size is smaller than KV size
  1585. // TODO: there should be a separate parameter that control prompt truncation
  1586. // context shift should be applied only during the generation phase
  1587. if (slot.n_prompt_tokens >= slot.n_ctx) {
  1588. slot.release();
  1589. send_error(slot, "the request exceeds the available context size. try increasing the context size or enable context shift", ERROR_TYPE_INVALID_REQUEST);
  1590. continue;
  1591. }
  1592. }
  1593. if (slot.params.n_keep < 0) {
  1594. slot.params.n_keep = slot.n_prompt_tokens;
  1595. }
  1596. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1597. // if input prompt is too big, truncate it
  1598. if (slot.n_prompt_tokens >= slot.n_ctx) {
  1599. const int n_left = slot.n_ctx - slot.params.n_keep;
  1600. const int n_block_size = n_left / 2;
  1601. const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1602. llama_tokens new_tokens(
  1603. prompt_tokens.begin(),
  1604. prompt_tokens.begin() + slot.params.n_keep);
  1605. new_tokens.insert(
  1606. new_tokens.end(),
  1607. prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
  1608. prompt_tokens.end());
  1609. prompt_tokens = std::move(new_tokens);
  1610. slot.truncated = true;
  1611. slot.n_prompt_tokens = prompt_tokens.size();
  1612. SLT_WRN(slot, "input truncated, n_ctx = %d, n_keep = %d, n_left = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, n_left, slot.n_prompt_tokens);
  1613. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1614. }
  1615. if (slot.params.cache_prompt) {
  1616. // reuse any previously computed tokens that are common with the new prompt
  1617. slot.n_past = longest_common_prefix(slot.cache_tokens, prompt_tokens);
  1618. // reuse chunks from the cached prompt by shifting their KV cache in the new position
  1619. if (params.n_cache_reuse > 0) {
  1620. size_t head_c = slot.n_past; // cache
  1621. size_t head_p = slot.n_past; // current prompt
  1622. SLT_DBG(slot, "trying to reuse chunks with size > %d, slot.n_past = %d\n", params.n_cache_reuse, slot.n_past);
  1623. while (head_c < slot.cache_tokens.size() &&
  1624. head_p < prompt_tokens.size()) {
  1625. size_t n_match = 0;
  1626. while (head_c + n_match < slot.cache_tokens.size() &&
  1627. head_p + n_match < prompt_tokens.size() &&
  1628. slot.cache_tokens[head_c + n_match] == prompt_tokens[head_p + n_match]) {
  1629. n_match++;
  1630. }
  1631. if (n_match >= (size_t) params.n_cache_reuse) {
  1632. SLT_INF(slot, "reusing chunk with size %zu, shifting KV cache [%zu, %zu) -> [%zu, %zu)\n", n_match, head_c, head_c + n_match, head_p, head_p + n_match);
  1633. //for (size_t i = head_p; i < head_p + n_match; i++) {
  1634. // SLT_DBG(slot, "cache token %3zu: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
  1635. //}
  1636. const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c;
  1637. llama_kv_cache_seq_rm (ctx, slot.id + 1, head_p, head_c);
  1638. llama_kv_cache_seq_add(ctx, slot.id + 1, head_c, -1, kv_shift);
  1639. for (size_t i = 0; i < n_match; i++) {
  1640. slot.cache_tokens[head_p + i] = slot.cache_tokens[head_c + i];
  1641. slot.n_past++;
  1642. }
  1643. head_c += n_match;
  1644. head_p += n_match;
  1645. } else {
  1646. head_c += 1;
  1647. }
  1648. }
  1649. SLT_DBG(slot, "after context reuse, new slot.n_past = %d\n", slot.n_past);
  1650. }
  1651. }
  1652. }
  1653. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) {
  1654. // we have to evaluate at least 1 token to generate logits.
  1655. SLT_WRN(slot, "need to evaluate at least 1 token to generate logits, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens);
  1656. slot.n_past--;
  1657. }
  1658. slot.n_prompt_tokens_processed = 0;
  1659. }
  1660. // non-causal tasks require to fit the entire prompt in the physical batch
  1661. if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING || slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
  1662. // cannot fit the prompt in the current batch - will try next iter
  1663. if (batch.n_tokens + slot.n_prompt_tokens > n_batch) {
  1664. continue;
  1665. }
  1666. }
  1667. // check that we are in the right batch_type, if not defer the slot
  1668. const bool slot_type =
  1669. slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING ||
  1670. slot.inf_type == SERVER_TASK_INF_TYPE_RERANK ? 1 : 0;
  1671. if (batch_type == -1) {
  1672. batch_type = slot_type;
  1673. } else if (batch_type != slot_type) {
  1674. continue;
  1675. }
  1676. // keep only the common part
  1677. if (!llama_kv_cache_seq_rm(ctx, slot.id + 1, slot.n_past, -1)) {
  1678. // could not partially delete (likely using a non-Transformer model)
  1679. llama_kv_cache_seq_rm(ctx, slot.id + 1, -1, -1);
  1680. // there is no common part left
  1681. slot.n_past = 0;
  1682. }
  1683. SLT_INF(slot, "kv cache rm [%d, end)\n", slot.n_past);
  1684. // remove the non-common part from the cache
  1685. slot.cache_tokens.resize(slot.n_past);
  1686. // add prompt tokens for processing in the current batch
  1687. while (slot.n_past < slot.n_prompt_tokens && batch.n_tokens < n_batch) {
  1688. common_batch_add(batch, prompt_tokens[slot.n_past], slot.n_past, { slot.id + 1 }, false);
  1689. if (slot.params.cache_prompt) {
  1690. slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
  1691. }
  1692. slot.n_prompt_tokens_processed++;
  1693. slot.n_past++;
  1694. }
  1695. SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);
  1696. // entire prompt has been processed
  1697. if (slot.n_past == slot.n_prompt_tokens) {
  1698. slot.state = SLOT_STATE_DONE_PROMPT;
  1699. GGML_ASSERT(batch.n_tokens > 0);
  1700. common_sampler_reset(slot.smpl);
  1701. // Process all prompt tokens through sampler system
  1702. for (int i = 0; i < slot.n_prompt_tokens; ++i) {
  1703. common_sampler_accept(slot.smpl, prompt_tokens[i], false);
  1704. }
  1705. // extract the logits only for the last token
  1706. batch.logits[batch.n_tokens - 1] = true;
  1707. slot.n_decoded = 0;
  1708. slot.i_batch = batch.n_tokens - 1;
  1709. SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
  1710. }
  1711. }
  1712. if (batch.n_tokens >= n_batch) {
  1713. break;
  1714. }
  1715. }
  1716. }
  1717. if (batch.n_tokens == 0) {
  1718. SRV_WRN("%s", "no tokens to decode\n");
  1719. return;
  1720. }
  1721. SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens);
  1722. // make sure we're in the right embedding mode
  1723. llama_set_embeddings(ctx, batch_type == 1);
  1724. // process the created batch of tokens
  1725. for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
  1726. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1727. llama_batch batch_view = {
  1728. n_tokens,
  1729. batch.token + i,
  1730. nullptr,
  1731. batch.pos + i,
  1732. batch.n_seq_id + i,
  1733. batch.seq_id + i,
  1734. batch.logits + i,
  1735. };
  1736. const int ret = llama_decode(ctx, batch_view);
  1737. metrics.on_decoded(slots);
  1738. if (ret != 0) {
  1739. if (n_batch == 1 || ret < 0) {
  1740. // if you get here, it means the KV cache is full - try increasing it via the context size
  1741. SRV_ERR("failed to decode the batch: KV cache is full - try increasing it via the context size, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
  1742. for (auto & slot : slots) {
  1743. slot.release();
  1744. send_error(slot, "Input prompt is too big compared to KV size. Please try increasing KV size.");
  1745. }
  1746. break; // break loop of n_batch
  1747. }
  1748. // retry with half the batch size to try to find a free slot in the KV cache
  1749. n_batch /= 2;
  1750. i -= n_batch;
  1751. SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
  1752. continue; // continue loop of n_batch
  1753. }
  1754. for (auto & slot : slots) {
  1755. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) {
  1756. continue; // continue loop of slots
  1757. }
  1758. if (slot.state == SLOT_STATE_DONE_PROMPT) {
  1759. if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING) {
  1760. // prompt evaluated for embedding
  1761. send_embedding(slot, batch_view);
  1762. slot.release();
  1763. slot.i_batch = -1;
  1764. continue; // continue loop of slots
  1765. }
  1766. if (slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
  1767. send_rerank(slot, batch_view);
  1768. slot.release();
  1769. slot.i_batch = -1;
  1770. continue; // continue loop of slots
  1771. }
  1772. // prompt evaluated for next-token prediction
  1773. slot.state = SLOT_STATE_GENERATING;
  1774. } else if (slot.state != SLOT_STATE_GENERATING) {
  1775. continue; // continue loop of slots
  1776. }
  1777. completion_token_output result;
  1778. const llama_token id = common_sampler_sample(slot.smpl, ctx, slot.i_batch - i);
  1779. common_sampler_accept(slot.smpl, id, true);
  1780. slot.n_decoded += 1;
  1781. if (slot.n_decoded == 1) {
  1782. slot.t_start_generation = ggml_time_us();
  1783. slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3;
  1784. metrics.on_prompt_eval(slot);
  1785. }
  1786. result.tok = id;
  1787. const auto * cur_p = common_sampler_get_candidates(slot.smpl);
  1788. for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) {
  1789. result.probs.push_back({
  1790. cur_p->data[i].id,
  1791. i >= cur_p->size ? 0.0f : cur_p->data[i].p,
  1792. });
  1793. }
  1794. if (!process_token(result, slot)) {
  1795. // release slot because of stop condition
  1796. slot.release();
  1797. slot.print_timings();
  1798. send_final_response(slot);
  1799. metrics.on_prediction(slot);
  1800. }
  1801. slot.i_batch = -1;
  1802. }
  1803. }
  1804. SRV_DBG("%s", "run slots completed\n");
  1805. }
  1806. json model_meta() const {
  1807. return json {
  1808. {"vocab_type", llama_vocab_type (model)},
  1809. {"n_vocab", llama_n_vocab (model)},
  1810. {"n_ctx_train", llama_n_ctx_train (model)},
  1811. {"n_embd", llama_n_embd (model)},
  1812. {"n_params", llama_model_n_params(model)},
  1813. {"size", llama_model_size (model)},
  1814. };
  1815. }
  1816. };
  1817. static void log_server_request(const httplib::Request & req, const httplib::Response & res) {
  1818. // skip GH copilot requests when using default port
  1819. if (req.path == "/v1/health" || req.path == "/v1/completions") {
  1820. return;
  1821. }
  1822. LOG_INF("request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status);
  1823. LOG_DBG("request: %s\n", req.body.c_str());
  1824. LOG_DBG("response: %s\n", res.body.c_str());
  1825. }
  1826. std::function<void(int)> shutdown_handler;
  1827. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  1828. inline void signal_handler(int signal) {
  1829. if (is_terminating.test_and_set()) {
  1830. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  1831. // this is for better developer experience, we can remove when the server is stable enough
  1832. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  1833. exit(1);
  1834. }
  1835. shutdown_handler(signal);
  1836. }
  1837. int main(int argc, char ** argv) {
  1838. // own arguments required by this example
  1839. common_params params;
  1840. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) {
  1841. return 1;
  1842. }
  1843. common_init();
  1844. // enabling this will output extra debug information in the HTTP responses from the server
  1845. // see format_final_response_oaicompat()
  1846. const bool verbose = params.verbosity > 9;
  1847. // struct that contains llama context and inference
  1848. server_context ctx_server;
  1849. if (params.model_alias == "unknown") {
  1850. params.model_alias = params.model;
  1851. }
  1852. llama_backend_init();
  1853. llama_numa_init(params.numa);
  1854. LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
  1855. LOG_INF("\n");
  1856. LOG_INF("%s\n", common_params_get_system_info(params).c_str());
  1857. LOG_INF("\n");
  1858. std::unique_ptr<httplib::Server> svr;
  1859. #ifdef CPPHTTPLIB_OPENSSL_SUPPORT
  1860. if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
  1861. LOG_INF("Running with SSL: key = %s, cert = %s\n", params.ssl_file_key.c_str(), params.ssl_file_cert.c_str());
  1862. svr.reset(
  1863. new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str())
  1864. );
  1865. } else {
  1866. LOG_INF("Running without SSL\n");
  1867. svr.reset(new httplib::Server());
  1868. }
  1869. #else
  1870. if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
  1871. LOG_ERR("Server is built without SSL support\n");
  1872. return 1;
  1873. }
  1874. svr.reset(new httplib::Server());
  1875. #endif
  1876. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  1877. svr->set_default_headers({{"Server", "llama.cpp"}});
  1878. // CORS preflight
  1879. svr->Options(R"(.*)", [](const httplib::Request &, httplib::Response & res) {
  1880. // Access-Control-Allow-Origin is already set by middleware
  1881. res.set_header("Access-Control-Allow-Credentials", "true");
  1882. res.set_header("Access-Control-Allow-Methods", "POST");
  1883. res.set_header("Access-Control-Allow-Headers", "*");
  1884. return res.set_content("", "text/html"); // blank response, no data
  1885. });
  1886. svr->set_logger(log_server_request);
  1887. auto res_error = [](httplib::Response & res, const json & error_data) {
  1888. json final_response {{"error", error_data}};
  1889. res.set_content(final_response.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
  1890. res.status = json_value(error_data, "code", 500);
  1891. };
  1892. auto res_ok = [](httplib::Response & res, const json & data) {
  1893. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
  1894. res.status = 200;
  1895. };
  1896. svr->set_exception_handler([&res_error](const httplib::Request &, httplib::Response & res, std::exception_ptr ep) {
  1897. std::string message;
  1898. try {
  1899. std::rethrow_exception(ep);
  1900. } catch (std::exception & e) {
  1901. message = e.what();
  1902. } catch (...) {
  1903. message = "Unknown Exception";
  1904. }
  1905. json formatted_error = format_error_response(message, ERROR_TYPE_SERVER);
  1906. LOG_WRN("got exception: %s\n", formatted_error.dump().c_str());
  1907. res_error(res, formatted_error);
  1908. });
  1909. svr->set_error_handler([&res_error](const httplib::Request &, httplib::Response & res) {
  1910. if (res.status == 404) {
  1911. res_error(res, format_error_response("File Not Found", ERROR_TYPE_NOT_FOUND));
  1912. }
  1913. // for other error codes, we skip processing here because it's already done by res_error()
  1914. });
  1915. // set timeouts and change hostname and port
  1916. svr->set_read_timeout (params.timeout_read);
  1917. svr->set_write_timeout(params.timeout_write);
  1918. std::unordered_map<std::string, std::string> log_data;
  1919. log_data["hostname"] = params.hostname;
  1920. log_data["port"] = std::to_string(params.port);
  1921. if (params.api_keys.size() == 1) {
  1922. auto key = params.api_keys[0];
  1923. log_data["api_key"] = "api_key: ****" + key.substr(std::max((int)(key.length() - 4), 0));
  1924. } else if (params.api_keys.size() > 1) {
  1925. log_data["api_key"] = "api_key: " + std::to_string(params.api_keys.size()) + " keys loaded";
  1926. }
  1927. // Necessary similarity of prompt for slot selection
  1928. ctx_server.slot_prompt_similarity = params.slot_prompt_similarity;
  1929. //
  1930. // Middlewares
  1931. //
  1932. auto middleware_validate_api_key = [&params, &res_error](const httplib::Request & req, httplib::Response & res) {
  1933. static const std::unordered_set<std::string> public_endpoints = {
  1934. "/health",
  1935. "/models",
  1936. "/v1/models",
  1937. };
  1938. // If API key is not set, skip validation
  1939. if (params.api_keys.empty()) {
  1940. return true;
  1941. }
  1942. // If path is public, skip validation
  1943. if (public_endpoints.find(req.path) != public_endpoints.end()) {
  1944. return true;
  1945. }
  1946. // Check for API key in the header
  1947. auto auth_header = req.get_header_value("Authorization");
  1948. std::string prefix = "Bearer ";
  1949. if (auth_header.substr(0, prefix.size()) == prefix) {
  1950. std::string received_api_key = auth_header.substr(prefix.size());
  1951. if (std::find(params.api_keys.begin(), params.api_keys.end(), received_api_key) != params.api_keys.end()) {
  1952. return true; // API key is valid
  1953. }
  1954. }
  1955. // API key is invalid or not provided
  1956. res_error(res, format_error_response("Invalid API Key", ERROR_TYPE_AUTHENTICATION));
  1957. LOG_WRN("Unauthorized: Invalid API Key\n");
  1958. return false;
  1959. };
  1960. auto middleware_server_state = [&res_error, &state](const httplib::Request & req, httplib::Response & res) {
  1961. server_state current_state = state.load();
  1962. if (current_state == SERVER_STATE_LOADING_MODEL) {
  1963. auto tmp = string_split<std::string>(req.path, '.');
  1964. if (req.path == "/" || tmp.back() == "html") {
  1965. res.set_content(reinterpret_cast<const char*>(loading_html), loading_html_len, "text/html; charset=utf-8");
  1966. res.status = 503;
  1967. } else {
  1968. res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE));
  1969. }
  1970. return false;
  1971. }
  1972. return true;
  1973. };
  1974. // register server middlewares
  1975. svr->set_pre_routing_handler([&middleware_validate_api_key, &middleware_server_state](const httplib::Request & req, httplib::Response & res) {
  1976. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  1977. if (!middleware_server_state(req, res)) {
  1978. return httplib::Server::HandlerResponse::Handled;
  1979. }
  1980. if (!middleware_validate_api_key(req, res)) {
  1981. return httplib::Server::HandlerResponse::Handled;
  1982. }
  1983. return httplib::Server::HandlerResponse::Unhandled;
  1984. });
  1985. //
  1986. // Route handlers (or controllers)
  1987. //
  1988. const auto handle_health = [&](const httplib::Request &, httplib::Response & res) {
  1989. // error and loading states are handled by middleware
  1990. json health = {{"status", "ok"}};
  1991. res_ok(res, health);
  1992. };
  1993. const auto handle_slots = [&](const httplib::Request & req, httplib::Response & res) {
  1994. if (!params.endpoint_slots) {
  1995. res_error(res, format_error_response("This server does not support slots endpoint. Start it with `--slots`", ERROR_TYPE_NOT_SUPPORTED));
  1996. return;
  1997. }
  1998. // request slots data using task queue
  1999. server_task task;
  2000. task.id = ctx_server.queue_tasks.get_new_id();
  2001. task.type = SERVER_TASK_TYPE_METRICS;
  2002. ctx_server.queue_results.add_waiting_task_id(task.id);
  2003. ctx_server.queue_tasks.post(task, true); // high-priority task
  2004. // get the result
  2005. server_task_result result = ctx_server.queue_results.recv(task.id);
  2006. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2007. // optionally return "fail_on_no_slot" error
  2008. const int n_idle_slots = result.data.at("idle");
  2009. if (req.has_param("fail_on_no_slot")) {
  2010. if (n_idle_slots == 0) {
  2011. res_error(res, format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE));
  2012. return;
  2013. }
  2014. }
  2015. res_ok(res, result.data.at("slots"));
  2016. };
  2017. const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) {
  2018. if (!params.endpoint_metrics) {
  2019. res_error(res, format_error_response("This server does not support metrics endpoint. Start it with `--metrics`", ERROR_TYPE_NOT_SUPPORTED));
  2020. return;
  2021. }
  2022. // request slots data using task queue
  2023. server_task task;
  2024. task.id = ctx_server.queue_tasks.get_new_id();
  2025. task.id_target = -1;
  2026. task.type = SERVER_TASK_TYPE_METRICS;
  2027. task.data.push_back({{"reset_bucket", true}});
  2028. ctx_server.queue_results.add_waiting_task_id(task.id);
  2029. ctx_server.queue_tasks.post(task, true); // high-priority task
  2030. // get the result
  2031. server_task_result result = ctx_server.queue_results.recv(task.id);
  2032. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2033. json data = result.data;
  2034. const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed");
  2035. const uint64_t t_prompt_processing = data.at("t_prompt_processing");
  2036. const uint64_t n_tokens_predicted = data.at("n_tokens_predicted");
  2037. const uint64_t t_tokens_generation = data.at("t_tokens_generation");
  2038. const uint64_t n_decode_total = data.at("n_decode_total");
  2039. const uint64_t n_busy_slots_total = data.at("n_busy_slots_total");
  2040. const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells");
  2041. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2042. json all_metrics_def = json {
  2043. {"counter", {{
  2044. {"name", "prompt_tokens_total"},
  2045. {"help", "Number of prompt tokens processed."},
  2046. {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")}
  2047. }, {
  2048. {"name", "prompt_seconds_total"},
  2049. {"help", "Prompt process time"},
  2050. {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3}
  2051. }, {
  2052. {"name", "tokens_predicted_total"},
  2053. {"help", "Number of generation tokens processed."},
  2054. {"value", (uint64_t) data.at("n_tokens_predicted_total")}
  2055. }, {
  2056. {"name", "tokens_predicted_seconds_total"},
  2057. {"help", "Predict process time"},
  2058. {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3}
  2059. }, {
  2060. {"name", "n_decode_total"},
  2061. {"help", "Total number of llama_decode() calls"},
  2062. {"value", n_decode_total}
  2063. }, {
  2064. {"name", "n_busy_slots_per_decode"},
  2065. {"help", "Average number of busy slots per llama_decode() call"},
  2066. {"value", (float) n_busy_slots_total / (float) n_decode_total}
  2067. }}},
  2068. {"gauge", {{
  2069. {"name", "prompt_tokens_seconds"},
  2070. {"help", "Average prompt throughput in tokens/s."},
  2071. {"value", n_prompt_tokens_processed ? 1.e3 / t_prompt_processing * n_prompt_tokens_processed : 0.}
  2072. },{
  2073. {"name", "predicted_tokens_seconds"},
  2074. {"help", "Average generation throughput in tokens/s."},
  2075. {"value", n_tokens_predicted ? 1.e3 / t_tokens_generation * n_tokens_predicted : 0.}
  2076. },{
  2077. {"name", "kv_cache_usage_ratio"},
  2078. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2079. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2080. },{
  2081. {"name", "kv_cache_tokens"},
  2082. {"help", "KV-cache tokens."},
  2083. {"value", (uint64_t) data.at("kv_cache_tokens_count")}
  2084. },{
  2085. {"name", "requests_processing"},
  2086. {"help", "Number of request processing."},
  2087. {"value", (uint64_t) data.at("processing")}
  2088. },{
  2089. {"name", "requests_deferred"},
  2090. {"help", "Number of request deferred."},
  2091. {"value", (uint64_t) data.at("deferred")}
  2092. }}}
  2093. };
  2094. std::stringstream prometheus;
  2095. for (const auto & el : all_metrics_def.items()) {
  2096. const auto & type = el.key();
  2097. const auto & metrics_def = el.value();
  2098. for (const auto & metric_def : metrics_def) {
  2099. const std::string name = metric_def.at("name");
  2100. const std::string help = metric_def.at("help");
  2101. auto value = json_value(metric_def, "value", 0.);
  2102. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2103. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2104. << "llamacpp:" << name << " " << value << "\n";
  2105. }
  2106. }
  2107. const int64_t t_start = data.at("t_start");
  2108. res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
  2109. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2110. res.status = 200; // HTTP OK
  2111. };
  2112. const auto handle_slots_save = [&ctx_server, &res_error, &res_ok, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2113. json request_data = json::parse(req.body);
  2114. std::string filename = request_data.at("filename");
  2115. if (!fs_validate_filename(filename)) {
  2116. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2117. return;
  2118. }
  2119. std::string filepath = params.slot_save_path + filename;
  2120. server_task task;
  2121. task.type = SERVER_TASK_TYPE_SLOT_SAVE;
  2122. task.data = {
  2123. { "id_slot", id_slot },
  2124. { "filename", filename },
  2125. { "filepath", filepath },
  2126. };
  2127. const int id_task = ctx_server.queue_tasks.post(task);
  2128. ctx_server.queue_results.add_waiting_task_id(id_task);
  2129. server_task_result result = ctx_server.queue_results.recv(id_task);
  2130. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2131. if (result.error) {
  2132. res_error(res, result.data);
  2133. } else {
  2134. res_ok(res, result.data);
  2135. }
  2136. };
  2137. const auto handle_slots_restore = [&ctx_server, &res_error, &res_ok, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2138. json request_data = json::parse(req.body);
  2139. std::string filename = request_data.at("filename");
  2140. if (!fs_validate_filename(filename)) {
  2141. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2142. return;
  2143. }
  2144. std::string filepath = params.slot_save_path + filename;
  2145. server_task task;
  2146. task.type = SERVER_TASK_TYPE_SLOT_RESTORE;
  2147. task.data = {
  2148. { "id_slot", id_slot },
  2149. { "filename", filename },
  2150. { "filepath", filepath },
  2151. };
  2152. const int id_task = ctx_server.queue_tasks.post(task);
  2153. ctx_server.queue_results.add_waiting_task_id(id_task);
  2154. server_task_result result = ctx_server.queue_results.recv(id_task);
  2155. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2156. if (result.error) {
  2157. res_error(res, result.data);
  2158. } else {
  2159. res_ok(res, result.data);
  2160. }
  2161. };
  2162. const auto handle_slots_erase = [&ctx_server, &res_error, &res_ok](const httplib::Request & /* req */, httplib::Response & res, int id_slot) {
  2163. server_task task;
  2164. task.type = SERVER_TASK_TYPE_SLOT_ERASE;
  2165. task.data = {
  2166. { "id_slot", id_slot },
  2167. };
  2168. const int id_task = ctx_server.queue_tasks.post(task);
  2169. ctx_server.queue_results.add_waiting_task_id(id_task);
  2170. server_task_result result = ctx_server.queue_results.recv(id_task);
  2171. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2172. if (result.error) {
  2173. res_error(res, result.data);
  2174. } else {
  2175. res_ok(res, result.data);
  2176. }
  2177. };
  2178. const auto handle_slots_action = [&params, &res_error, &handle_slots_save, &handle_slots_restore, &handle_slots_erase](const httplib::Request & req, httplib::Response & res) {
  2179. if (params.slot_save_path.empty()) {
  2180. res_error(res, format_error_response("This server does not support slots action. Start it with `--slot-save-path`", ERROR_TYPE_NOT_SUPPORTED));
  2181. return;
  2182. }
  2183. std::string id_slot_str = req.path_params.at("id_slot");
  2184. int id_slot;
  2185. try {
  2186. id_slot = std::stoi(id_slot_str);
  2187. } catch (const std::exception &) {
  2188. res_error(res, format_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST));
  2189. return;
  2190. }
  2191. std::string action = req.get_param_value("action");
  2192. if (action == "save") {
  2193. handle_slots_save(req, res, id_slot);
  2194. } else if (action == "restore") {
  2195. handle_slots_restore(req, res, id_slot);
  2196. } else if (action == "erase") {
  2197. handle_slots_erase(req, res, id_slot);
  2198. } else {
  2199. res_error(res, format_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST));
  2200. }
  2201. };
  2202. const auto handle_props = [&ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) {
  2203. json data = {
  2204. { "default_generation_settings", ctx_server.default_generation_settings_for_props },
  2205. { "total_slots", ctx_server.params.n_parallel },
  2206. { "chat_template", llama_get_chat_template(ctx_server.model) },
  2207. };
  2208. res_ok(res, data);
  2209. };
  2210. const auto handle_props_change = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2211. if (!ctx_server.params.endpoint_props) {
  2212. res_error(res, format_error_response("This server does not support changing global properties. Start it with `--props`", ERROR_TYPE_NOT_SUPPORTED));
  2213. return;
  2214. }
  2215. json data = json::parse(req.body);
  2216. // update any props here
  2217. res_ok(res, {{ "success", true }});
  2218. };
  2219. const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok](server_task_inf_type inf_type, json & data, httplib::Response & res) {
  2220. if (ctx_server.params.embedding) {
  2221. res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED));
  2222. return;
  2223. }
  2224. std::vector<server_task> tasks = ctx_server.create_tasks_inference(data, inf_type);
  2225. ctx_server.queue_results.add_waiting_tasks(tasks);
  2226. ctx_server.queue_tasks.post(tasks);
  2227. bool stream = json_value(data, "stream", false);
  2228. const auto task_ids = server_task::get_list_id(tasks);
  2229. if (!stream) {
  2230. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2231. if (results.size() == 1) {
  2232. // single result
  2233. res_ok(res, results[0].data);
  2234. } else {
  2235. // multiple results (multitask)
  2236. json arr = json::array();
  2237. for (const auto & res : results) {
  2238. arr.push_back(res.data);
  2239. }
  2240. res_ok(res, arr);
  2241. }
  2242. }, [&](const json & error_data) {
  2243. res_error(res, error_data);
  2244. });
  2245. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2246. } else {
  2247. const auto chunked_content_provider = [task_ids, &ctx_server](size_t, httplib::DataSink & sink) {
  2248. ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool {
  2249. return server_sent_event(sink, "data", result.data);
  2250. }, [&](const json & error_data) {
  2251. server_sent_event(sink, "error", error_data);
  2252. });
  2253. sink.done();
  2254. return false;
  2255. };
  2256. auto on_complete = [task_ids, &ctx_server] (bool) {
  2257. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2258. };
  2259. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2260. }
  2261. };
  2262. const auto handle_completions = [&handle_completions_generic](const httplib::Request & req, httplib::Response & res) {
  2263. json data = json::parse(req.body);
  2264. return handle_completions_generic(SERVER_TASK_INF_TYPE_COMPLETION, data, res);
  2265. };
  2266. const auto handle_infill = [&ctx_server, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) {
  2267. // check model compatibility
  2268. std::string err;
  2269. if (llama_token_fim_pre(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2270. err += "prefix token is missing. ";
  2271. }
  2272. if (llama_token_fim_suf(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2273. err += "suffix token is missing. ";
  2274. }
  2275. if (llama_token_fim_mid(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2276. err += "middle token is missing. ";
  2277. }
  2278. if (!err.empty()) {
  2279. res_error(res, format_error_response(string_format("Infill is not supported by this model: %s", err.c_str()), ERROR_TYPE_NOT_SUPPORTED));
  2280. return;
  2281. }
  2282. json data = json::parse(req.body);
  2283. // validate input
  2284. if (!data.contains("input_prefix")) {
  2285. res_error(res, format_error_response("\"input_prefix\" is required", ERROR_TYPE_INVALID_REQUEST));
  2286. }
  2287. if (!data.contains("input_suffix")) {
  2288. res_error(res, format_error_response("\"input_suffix\" is required", ERROR_TYPE_INVALID_REQUEST));
  2289. }
  2290. if (data.contains("input_extra") && !data.at("input_extra").is_array()) {
  2291. res_error(res, format_error_response("\"input_extra\" must be an array of {\"filename\": string, \"text\": string}", ERROR_TYPE_INVALID_REQUEST));
  2292. return;
  2293. }
  2294. json input_extra = json_value(data, "input_extra", json::array());
  2295. for (const auto & chunk : input_extra) {
  2296. // { "text": string, "filename": string }
  2297. if (!chunk.contains("text") || !chunk.at("text").is_string()) {
  2298. res_error(res, format_error_response("extra_context chunk must contain a \"text\" field with a string value", ERROR_TYPE_INVALID_REQUEST));
  2299. return;
  2300. }
  2301. // filename is optional
  2302. if (chunk.contains("filename") && !chunk.at("filename").is_string()) {
  2303. res_error(res, format_error_response("extra_context chunk's \"filename\" field must be a string", ERROR_TYPE_INVALID_REQUEST));
  2304. return;
  2305. }
  2306. }
  2307. data["input_extra"] = input_extra; // default to empty array if it's not exist
  2308. return handle_completions_generic(SERVER_TASK_INF_TYPE_INFILL, data, res);
  2309. };
  2310. // TODO: maybe merge this function with "handle_completions_generic"
  2311. const auto handle_chat_completions = [&ctx_server, &params, &res_error, &res_ok, verbose](const httplib::Request & req, httplib::Response & res) {
  2312. if (ctx_server.params.embedding) {
  2313. res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED));
  2314. return;
  2315. }
  2316. json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
  2317. std::vector<server_task> tasks = ctx_server.create_tasks_inference(data, SERVER_TASK_INF_TYPE_COMPLETION);
  2318. ctx_server.queue_results.add_waiting_tasks(tasks);
  2319. ctx_server.queue_tasks.post(tasks);
  2320. bool stream = json_value(data, "stream", false);
  2321. const auto task_ids = server_task::get_list_id(tasks);
  2322. const auto completion_id = gen_chatcmplid();
  2323. if (!stream) {
  2324. ctx_server.receive_cmpl_results(task_ids, [&](const std::vector<server_task_result> & results) {
  2325. // multitask is never support in chat completion, there is only one result
  2326. json result_oai = format_final_response_oaicompat(data, results[0].data, completion_id, /*.streaming =*/ false, verbose);
  2327. res_ok(res, result_oai);
  2328. }, [&](const json & error_data) {
  2329. res_error(res, error_data);
  2330. });
  2331. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2332. } else {
  2333. const auto chunked_content_provider = [task_ids, &ctx_server, completion_id](size_t, httplib::DataSink & sink) {
  2334. ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool {
  2335. std::vector<json> result_array = format_partial_response_oaicompat(result.data, completion_id);
  2336. for (auto & event_data : result_array) {
  2337. if (event_data.empty()) {
  2338. continue; // skip the stop token
  2339. }
  2340. if (!server_sent_event(sink, "data", event_data)) {
  2341. return false; // connection is closed
  2342. }
  2343. }
  2344. return true; // ok
  2345. }, [&](const json & error_data) {
  2346. server_sent_event(sink, "error", error_data);
  2347. });
  2348. static const std::string ev_done = "data: [DONE]\n\n";
  2349. sink.write(ev_done.data(), ev_done.size());
  2350. sink.done();
  2351. return true;
  2352. };
  2353. auto on_complete = [task_ids, &ctx_server] (bool) {
  2354. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2355. };
  2356. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2357. }
  2358. };
  2359. const auto handle_models = [&params, &ctx_server](const httplib::Request &, httplib::Response & res) {
  2360. json models = {
  2361. {"object", "list"},
  2362. {"data", {
  2363. {
  2364. {"id", params.model_alias},
  2365. {"object", "model"},
  2366. {"created", std::time(0)},
  2367. {"owned_by", "llamacpp"},
  2368. {"meta", ctx_server.model_meta()}
  2369. },
  2370. }}
  2371. };
  2372. res.set_content(models.dump(), MIMETYPE_JSON);
  2373. };
  2374. const auto handle_tokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2375. const json body = json::parse(req.body);
  2376. json tokens_response = json::array();
  2377. if (body.count("content") != 0) {
  2378. const bool add_special = json_value(body, "add_special", false);
  2379. const bool with_pieces = json_value(body, "with_pieces", false);
  2380. llama_tokens tokens = tokenize_mixed(ctx_server.ctx, body.at("content"), add_special, true);
  2381. if (with_pieces) {
  2382. for (const auto& token : tokens) {
  2383. std::string piece = common_token_to_piece(ctx_server.ctx, token);
  2384. json piece_json;
  2385. // Check if the piece is valid UTF-8
  2386. if (is_valid_utf8(piece)) {
  2387. piece_json = piece;
  2388. } else {
  2389. // If not valid UTF-8, store as array of byte values
  2390. piece_json = json::array();
  2391. for (unsigned char c : piece) {
  2392. piece_json.push_back(static_cast<int>(c));
  2393. }
  2394. }
  2395. tokens_response.push_back({
  2396. {"id", token},
  2397. {"piece", piece_json}
  2398. });
  2399. }
  2400. } else {
  2401. tokens_response = tokens;
  2402. }
  2403. }
  2404. const json data = format_tokenizer_response(tokens_response);
  2405. res_ok(res, data);
  2406. };
  2407. const auto handle_detokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2408. const json body = json::parse(req.body);
  2409. std::string content;
  2410. if (body.count("tokens") != 0) {
  2411. const llama_tokens tokens = body.at("tokens");
  2412. content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
  2413. }
  2414. const json data = format_detokenized_response(content);
  2415. res_ok(res, data);
  2416. };
  2417. const auto handle_embeddings = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2418. const json body = json::parse(req.body);
  2419. bool is_openai = false;
  2420. // an input prompt can be a string or a list of tokens (integer)
  2421. json prompt;
  2422. if (body.count("input") != 0) {
  2423. is_openai = true;
  2424. prompt = body.at("input");
  2425. } else if (body.count("content") != 0) {
  2426. // with "content", we only support single prompt
  2427. prompt = std::vector<std::string>{body.at("content")};
  2428. } else {
  2429. res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2430. return;
  2431. }
  2432. // create and queue the task
  2433. json responses = json::array();
  2434. bool error = false;
  2435. {
  2436. std::vector<server_task> tasks = ctx_server.create_tasks_inference({{"prompt", prompt}}, SERVER_TASK_INF_TYPE_EMBEDDING);
  2437. ctx_server.queue_results.add_waiting_tasks(tasks);
  2438. ctx_server.queue_tasks.post(tasks);
  2439. // get the result
  2440. std::unordered_set<int> task_ids = server_task::get_list_id(tasks);
  2441. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2442. for (const auto & res : results) {
  2443. responses.push_back(res.data);
  2444. }
  2445. }, [&](const json & error_data) {
  2446. res_error(res, error_data);
  2447. error = true;
  2448. });
  2449. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2450. }
  2451. if (error) {
  2452. return;
  2453. }
  2454. // write JSON response
  2455. json root = is_openai
  2456. ? format_embeddings_response_oaicompat(body, responses)
  2457. : responses[0];
  2458. res_ok(res, root);
  2459. };
  2460. const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2461. if (!ctx_server.params.reranking || ctx_server.params.embedding) {
  2462. res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking` and without `--embedding`", ERROR_TYPE_NOT_SUPPORTED));
  2463. return;
  2464. }
  2465. const json body = json::parse(req.body);
  2466. // TODO: implement
  2467. //int top_n = 1;
  2468. //if (body.count("top_n") != 1) {
  2469. // top_n = body.at("top_n");
  2470. //} else {
  2471. // res_error(res, format_error_response("\"top_n\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2472. // return;
  2473. //}
  2474. json query;
  2475. if (body.count("query") == 1) {
  2476. query = body.at("query");
  2477. if (!query.is_string()) {
  2478. res_error(res, format_error_response("\"query\" must be a string", ERROR_TYPE_INVALID_REQUEST));
  2479. return;
  2480. }
  2481. } else {
  2482. res_error(res, format_error_response("\"query\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2483. return;
  2484. }
  2485. std::vector<std::string> documents = json_value(body, "documents", std::vector<std::string>());
  2486. if (documents.empty()) {
  2487. res_error(res, format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST));
  2488. return;
  2489. }
  2490. // construct prompt object: array of ["query", "doc0", "doc1", ...]
  2491. json prompt;
  2492. prompt.push_back(query);
  2493. for (const auto & doc : documents) {
  2494. prompt.push_back(doc);
  2495. }
  2496. LOG_DBG("rerank prompt: %s\n", prompt.dump().c_str());
  2497. // create and queue the task
  2498. json responses = json::array();
  2499. bool error = false;
  2500. {
  2501. std::vector<server_task> tasks = ctx_server.create_tasks_inference({{"prompt", prompt}}, SERVER_TASK_INF_TYPE_RERANK);
  2502. ctx_server.queue_results.add_waiting_tasks(tasks);
  2503. ctx_server.queue_tasks.post(tasks);
  2504. // get the result
  2505. std::unordered_set<int> task_ids = server_task::get_list_id(tasks);
  2506. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2507. for (const auto & res : results) {
  2508. responses.push_back(res.data);
  2509. }
  2510. }, [&](const json & error_data) {
  2511. res_error(res, error_data);
  2512. error = true;
  2513. });
  2514. }
  2515. if (error) {
  2516. return;
  2517. }
  2518. // write JSON response
  2519. json root = format_response_rerank(body, responses);
  2520. res_ok(res, root);
  2521. };
  2522. const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) {
  2523. json result = json::array();
  2524. for (size_t i = 0; i < ctx_server.loras.size(); ++i) {
  2525. auto & lora = ctx_server.loras[i];
  2526. result.push_back({
  2527. {"id", i},
  2528. {"path", lora.path},
  2529. {"scale", lora.scale},
  2530. });
  2531. }
  2532. res_ok(res, result);
  2533. res.status = 200; // HTTP OK
  2534. };
  2535. const auto handle_lora_adapters_apply = [&](const httplib::Request & req, httplib::Response & res) {
  2536. const std::vector<json> body = json::parse(req.body);
  2537. int max_idx = ctx_server.loras.size();
  2538. // clear existing value
  2539. for (auto & lora : ctx_server.loras) {
  2540. lora.scale = 0.0f;
  2541. }
  2542. // set value
  2543. for (auto entry : body) {
  2544. int id = entry.at("id");
  2545. float scale = entry.at("scale");
  2546. if (0 <= id && id < max_idx) {
  2547. ctx_server.loras[id].scale = scale;
  2548. } else {
  2549. throw std::runtime_error("invalid adapter id");
  2550. }
  2551. }
  2552. server_task task;
  2553. task.type = SERVER_TASK_TYPE_SET_LORA;
  2554. const int id_task = ctx_server.queue_tasks.post(task);
  2555. ctx_server.queue_results.add_waiting_task_id(id_task);
  2556. server_task_result result = ctx_server.queue_results.recv(id_task);
  2557. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2558. res_ok(res, result.data);
  2559. res.status = 200; // HTTP OK
  2560. };
  2561. auto handle_static_file = [](unsigned char * content, size_t len, const char * mime_type) {
  2562. return [content, len, mime_type](const httplib::Request &, httplib::Response & res) {
  2563. res.set_content(reinterpret_cast<const char*>(content), len, mime_type);
  2564. return false;
  2565. };
  2566. };
  2567. //
  2568. // Router
  2569. //
  2570. // register static assets routes
  2571. if (!params.public_path.empty()) {
  2572. // Set the base directory for serving static files
  2573. svr->set_base_dir(params.public_path);
  2574. }
  2575. if (!params.api_keys.empty()) {
  2576. // for now, if API key is set, web UI is unusable
  2577. svr->Get("/", [&](const httplib::Request &, httplib::Response & res) {
  2578. return res.set_content("Web UI is disabled because API key is set.", "text/html; charset=utf-8");
  2579. });
  2580. } else {
  2581. // using embedded static files
  2582. svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8"));
  2583. svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8"));
  2584. svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8"));
  2585. svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8"));
  2586. // add new-ui files
  2587. svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8"));
  2588. svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8"));
  2589. svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8"));
  2590. svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8"));
  2591. svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8"));
  2592. svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8"));
  2593. svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8"));
  2594. svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8"));
  2595. svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8"));
  2596. svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8"));
  2597. svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8"));
  2598. }
  2599. // register API routes
  2600. svr->Get ("/health", handle_health); // public endpoint (no API key check)
  2601. svr->Get ("/metrics", handle_metrics);
  2602. svr->Get ("/props", handle_props);
  2603. svr->Post("/props", handle_props_change);
  2604. svr->Get ("/models", handle_models); // public endpoint (no API key check)
  2605. svr->Get ("/v1/models", handle_models); // public endpoint (no API key check)
  2606. svr->Post("/completion", handle_completions); // legacy
  2607. svr->Post("/completions", handle_completions);
  2608. svr->Post("/v1/completions", handle_completions);
  2609. svr->Post("/chat/completions", handle_chat_completions);
  2610. svr->Post("/v1/chat/completions", handle_chat_completions);
  2611. svr->Post("/infill", handle_infill);
  2612. svr->Post("/embedding", handle_embeddings); // legacy
  2613. svr->Post("/embeddings", handle_embeddings);
  2614. svr->Post("/v1/embeddings", handle_embeddings);
  2615. svr->Post("/rerank", handle_rerank);
  2616. svr->Post("/reranking", handle_rerank);
  2617. svr->Post("/v1/rerank", handle_rerank);
  2618. svr->Post("/v1/reranking", handle_rerank);
  2619. svr->Post("/tokenize", handle_tokenize);
  2620. svr->Post("/detokenize", handle_detokenize);
  2621. // LoRA adapters hotswap
  2622. svr->Get ("/lora-adapters", handle_lora_adapters_list);
  2623. svr->Post("/lora-adapters", handle_lora_adapters_apply);
  2624. // Save & load slots
  2625. svr->Get ("/slots", handle_slots);
  2626. svr->Post("/slots/:id_slot", handle_slots_action);
  2627. //
  2628. // Start the server
  2629. //
  2630. if (params.n_threads_http < 1) {
  2631. // +2 threads for monitoring endpoints
  2632. params.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2633. }
  2634. log_data["n_threads_http"] = std::to_string(params.n_threads_http);
  2635. svr->new_task_queue = [&params] { return new httplib::ThreadPool(params.n_threads_http); };
  2636. // clean up function, to be called before exit
  2637. auto clean_up = [&svr]() {
  2638. svr->stop();
  2639. llama_backend_free();
  2640. };
  2641. // bind HTTP listen port, run the HTTP server in a thread
  2642. if (!svr->bind_to_port(params.hostname, params.port)) {
  2643. //LOG_ERROR("couldn't bind HTTP server socket", {
  2644. // {"hostname", params.hostname},
  2645. // {"port", params.port},
  2646. //});
  2647. LOG_ERR("%s: couldn't bind HTTP server socket, hostname: %s, port: %d\n", __func__, params.hostname.c_str(), params.port);
  2648. clean_up();
  2649. return 1;
  2650. }
  2651. std::thread t([&]() { svr->listen_after_bind(); });
  2652. svr->wait_until_ready();
  2653. LOG_INF("%s: HTTP server is listening, hostname: %s, port: %d, http threads: %d\n", __func__, params.hostname.c_str(), params.port, params.n_threads_http);
  2654. // load the model
  2655. LOG_INF("%s: loading model\n", __func__);
  2656. if (!ctx_server.load_model(params)) {
  2657. clean_up();
  2658. t.join();
  2659. LOG_ERR("%s: exiting due to model loading error\n", __func__);
  2660. return 1;
  2661. }
  2662. ctx_server.init();
  2663. state.store(SERVER_STATE_READY);
  2664. LOG_INF("%s: model loaded\n", __func__);
  2665. // if a custom chat template is not supplied, we will use the one that comes with the model (if any)
  2666. if (params.chat_template.empty()) {
  2667. if (!ctx_server.validate_model_chat_template()) {
  2668. LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
  2669. params.chat_template = "chatml";
  2670. }
  2671. }
  2672. // print sample chat example to make it clear which template is used
  2673. LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str());
  2674. ctx_server.queue_tasks.on_new_task(std::bind(
  2675. &server_context::process_single_task, &ctx_server, std::placeholders::_1));
  2676. ctx_server.queue_tasks.on_update_slots(std::bind(
  2677. &server_context::update_slots, &ctx_server));
  2678. shutdown_handler = [&](int) {
  2679. ctx_server.queue_tasks.terminate();
  2680. };
  2681. LOG_INF("%s: server is listening on http://%s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port);
  2682. ctx_server.queue_tasks.start_loop();
  2683. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2684. struct sigaction sigint_action;
  2685. sigint_action.sa_handler = signal_handler;
  2686. sigemptyset (&sigint_action.sa_mask);
  2687. sigint_action.sa_flags = 0;
  2688. sigaction(SIGINT, &sigint_action, NULL);
  2689. sigaction(SIGTERM, &sigint_action, NULL);
  2690. #elif defined (_WIN32)
  2691. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2692. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2693. };
  2694. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2695. #endif
  2696. clean_up();
  2697. t.join();
  2698. return 0;
  2699. }