server.cpp 134 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402
  1. #include "utils.hpp"
  2. #include "common.h"
  3. #include "json-schema-to-grammar.h"
  4. #include "llama.h"
  5. #include "grammar-parser.h"
  6. #ifndef NDEBUG
  7. // crash the server in debug mode, otherwise send an http 500 error
  8. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  9. #endif
  10. // increase max payload length to allow use of larger context size
  11. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  12. #include "httplib.h"
  13. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  14. #define JSON_ASSERT GGML_ASSERT
  15. #include "json.hpp"
  16. // auto generated files (update with ./deps.sh)
  17. #include "colorthemes.css.hpp"
  18. #include "style.css.hpp"
  19. #include "theme-beeninorder.css.hpp"
  20. #include "theme-ketivah.css.hpp"
  21. #include "theme-mangotango.css.hpp"
  22. #include "theme-playground.css.hpp"
  23. #include "theme-polarnight.css.hpp"
  24. #include "theme-snowstorm.css.hpp"
  25. #include "index.html.hpp"
  26. #include "index-new.html.hpp"
  27. #include "index.js.hpp"
  28. #include "completion.js.hpp"
  29. #include "system-prompts.js.hpp"
  30. #include "prompt-formats.js.hpp"
  31. #include "json-schema-to-grammar.mjs.hpp"
  32. #include <atomic>
  33. #include <chrono>
  34. #include <condition_variable>
  35. #include <cstddef>
  36. #include <set>
  37. #include <mutex>
  38. #include <thread>
  39. #include <signal.h>
  40. #include <memory>
  41. using json = nlohmann::ordered_json;
  42. bool server_verbose = false;
  43. bool server_log_json = true;
  44. enum stop_type {
  45. STOP_TYPE_FULL,
  46. STOP_TYPE_PARTIAL,
  47. };
  48. enum slot_state {
  49. SLOT_STATE_IDLE,
  50. SLOT_STATE_PROCESSING,
  51. };
  52. enum slot_command {
  53. SLOT_COMMAND_NONE,
  54. SLOT_COMMAND_LOAD_PROMPT,
  55. SLOT_COMMAND_RELEASE,
  56. };
  57. enum server_state {
  58. SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
  59. SERVER_STATE_READY, // Server is ready and model is loaded
  60. SERVER_STATE_ERROR // An error occurred, load_model failed
  61. };
  62. enum server_task_type {
  63. SERVER_TASK_TYPE_COMPLETION,
  64. SERVER_TASK_TYPE_CANCEL,
  65. SERVER_TASK_TYPE_NEXT_RESPONSE,
  66. SERVER_TASK_TYPE_METRICS,
  67. SERVER_TASK_TYPE_SLOT_SAVE,
  68. SERVER_TASK_TYPE_SLOT_RESTORE,
  69. SERVER_TASK_TYPE_SLOT_ERASE,
  70. };
  71. struct server_task {
  72. int id = -1; // to be filled by server_queue
  73. int id_multi = -1;
  74. int id_target = -1;
  75. server_task_type type;
  76. json data;
  77. bool infill = false;
  78. bool embedding = false;
  79. };
  80. struct server_task_result {
  81. int id = -1;
  82. int id_multi = -1;
  83. json data;
  84. bool stop;
  85. bool error;
  86. };
  87. struct server_task_multi {
  88. int id = -1;
  89. std::set<int> subtasks_remaining;
  90. std::vector<server_task_result> results;
  91. };
  92. struct slot_params {
  93. bool stream = true;
  94. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  95. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  96. int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
  97. int32_t n_predict = -1; // new tokens to predict
  98. std::vector<std::string> antiprompt;
  99. json input_prefix;
  100. json input_suffix;
  101. };
  102. struct server_slot {
  103. int id;
  104. int id_task = -1;
  105. int id_multi = -1;
  106. struct slot_params params;
  107. slot_state state = SLOT_STATE_IDLE;
  108. slot_command command = SLOT_COMMAND_NONE;
  109. // used to determine the slot that has been used the longest
  110. int64_t t_last_used = -1;
  111. // generation props
  112. int32_t n_ctx = 0; // context size per slot
  113. int32_t n_past = 0;
  114. int32_t n_decoded = 0;
  115. int32_t n_remaining = -1;
  116. int32_t i_batch = -1;
  117. int32_t n_predict = -1; // TODO: disambiguate from params.n_predict
  118. int32_t n_prompt_tokens = 0;
  119. int32_t n_prompt_tokens_processed = 0;
  120. std::string prompt;
  121. // when a task is submitted, we first tokenize the prompt and store it here
  122. std::vector<llama_token> prompt_tokens;
  123. std::string generated_text;
  124. std::vector<llama_token> cache_tokens;
  125. std::vector<completion_token_output> generated_token_probs;
  126. bool infill = false;
  127. bool embedding = false;
  128. bool has_next_token = true;
  129. bool truncated = false;
  130. bool stopped_eos = false;
  131. bool stopped_word = false;
  132. bool stopped_limit = false;
  133. bool oaicompat = false;
  134. std::string oaicompat_model;
  135. std::string stopping_word;
  136. // sampling
  137. llama_token sampled;
  138. struct llama_sampling_params sparams;
  139. llama_sampling_context * ctx_sampling = nullptr;
  140. json json_schema;
  141. int32_t ga_i = 0; // group-attention state
  142. int32_t ga_n = 1; // group-attention factor
  143. int32_t ga_w = 512; // group-attention width
  144. int32_t n_past_se = 0; // self-extend
  145. // stats
  146. size_t n_sent_text = 0; // number of sent text character
  147. size_t n_sent_token_probs = 0;
  148. int64_t t_start_process_prompt;
  149. int64_t t_start_generation;
  150. double t_prompt_processing; // ms
  151. double t_token_generation; // ms
  152. void reset() {
  153. n_prompt_tokens = 0;
  154. generated_text = "";
  155. truncated = false;
  156. stopped_eos = false;
  157. stopped_word = false;
  158. stopped_limit = false;
  159. stopping_word = "";
  160. n_past = 0;
  161. n_sent_text = 0;
  162. n_sent_token_probs = 0;
  163. infill = false;
  164. ga_i = 0;
  165. n_past_se = 0;
  166. generated_token_probs.clear();
  167. }
  168. bool has_budget(gpt_params &global_params) {
  169. if (params.n_predict == -1 && global_params.n_predict == -1) {
  170. return true; // limitless
  171. }
  172. n_remaining = -1;
  173. if (params.n_predict != -1) {
  174. n_remaining = params.n_predict - n_decoded;
  175. } else if (global_params.n_predict != -1) {
  176. n_remaining = global_params.n_predict - n_decoded;
  177. }
  178. return n_remaining > 0; // no budget
  179. }
  180. bool available() const {
  181. return state == SLOT_STATE_IDLE && command == SLOT_COMMAND_NONE;
  182. }
  183. bool is_processing() const {
  184. return (state == SLOT_STATE_IDLE && command == SLOT_COMMAND_LOAD_PROMPT) || state == SLOT_STATE_PROCESSING;
  185. }
  186. void add_token_string(const completion_token_output & token) {
  187. if (command == SLOT_COMMAND_RELEASE) {
  188. return;
  189. }
  190. generated_token_probs.push_back(token);
  191. }
  192. void release() {
  193. if (state == SLOT_STATE_PROCESSING) {
  194. t_token_generation = (ggml_time_us() - t_start_generation) / 1e3;
  195. command = SLOT_COMMAND_RELEASE;
  196. }
  197. }
  198. json get_formated_timings() const {
  199. return json {
  200. {"prompt_n", n_prompt_tokens_processed},
  201. {"prompt_ms", t_prompt_processing},
  202. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  203. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  204. {"predicted_n", n_decoded},
  205. {"predicted_ms", t_token_generation},
  206. {"predicted_per_token_ms", t_token_generation / n_decoded},
  207. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  208. };
  209. }
  210. size_t find_stopping_strings(const std::string & text, const size_t last_token_size, const stop_type type) {
  211. size_t stop_pos = std::string::npos;
  212. for (const std::string & word : params.antiprompt) {
  213. size_t pos;
  214. if (type == STOP_TYPE_FULL) {
  215. const size_t tmp = word.size() + last_token_size;
  216. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  217. pos = text.find(word, from_pos);
  218. } else {
  219. pos = find_partial_stop_string(word, text);
  220. }
  221. if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) {
  222. if (type == STOP_TYPE_FULL) {
  223. stopped_word = true;
  224. stopping_word = word;
  225. has_next_token = false;
  226. }
  227. stop_pos = pos;
  228. }
  229. }
  230. return stop_pos;
  231. }
  232. void print_timings() const {
  233. char buffer[512];
  234. double t_token = t_prompt_processing / n_prompt_tokens_processed;
  235. double n_tokens_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  236. snprintf(buffer, 512, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
  237. t_prompt_processing, n_prompt_tokens_processed,
  238. t_token, n_tokens_second);
  239. LOG_INFO(buffer, {
  240. {"id_slot", id},
  241. {"id_task", id_task},
  242. {"t_prompt_processing", t_prompt_processing},
  243. {"n_prompt_tokens_processed", n_prompt_tokens_processed},
  244. {"t_token", t_token},
  245. {"n_tokens_second", n_tokens_second},
  246. });
  247. t_token = t_token_generation / n_decoded;
  248. n_tokens_second = 1e3 / t_token_generation * n_decoded;
  249. snprintf(buffer, 512, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
  250. t_token_generation, n_decoded,
  251. t_token, n_tokens_second);
  252. LOG_INFO(buffer, {
  253. {"id_slot", id},
  254. {"id_task", id_task},
  255. {"t_token_generation", t_token_generation},
  256. {"n_decoded", n_decoded},
  257. {"t_token", t_token},
  258. {"n_tokens_second", n_tokens_second},
  259. });
  260. snprintf(buffer, 512, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
  261. LOG_INFO(buffer, {
  262. {"id_slot", id},
  263. {"id_task", id_task},
  264. {"t_prompt_processing", t_prompt_processing},
  265. {"t_token_generation", t_token_generation},
  266. {"t_total", t_prompt_processing + t_token_generation},
  267. });
  268. }
  269. };
  270. struct server_metrics {
  271. int64_t t_start = 0;
  272. uint64_t n_prompt_tokens_processed_total = 0;
  273. uint64_t t_prompt_processing_total = 0;
  274. uint64_t n_tokens_predicted_total = 0;
  275. uint64_t t_tokens_generation_total = 0;
  276. uint64_t n_prompt_tokens_processed = 0;
  277. uint64_t t_prompt_processing = 0;
  278. uint64_t n_tokens_predicted = 0;
  279. uint64_t t_tokens_generation = 0;
  280. void init() {
  281. t_start = ggml_time_us();
  282. }
  283. void on_prompt_eval(const server_slot & slot) {
  284. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  285. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  286. t_prompt_processing += slot.t_prompt_processing;
  287. t_prompt_processing_total += slot.t_prompt_processing;
  288. }
  289. void on_prediction(const server_slot & slot) {
  290. n_tokens_predicted_total += slot.n_decoded;
  291. n_tokens_predicted += slot.n_decoded;
  292. t_tokens_generation += slot.t_token_generation;
  293. t_tokens_generation_total += slot.t_token_generation;
  294. }
  295. void reset_bucket() {
  296. n_prompt_tokens_processed = 0;
  297. t_prompt_processing = 0;
  298. n_tokens_predicted = 0;
  299. t_tokens_generation = 0;
  300. }
  301. };
  302. struct server_queue {
  303. int id = 0;
  304. bool running;
  305. // queues
  306. std::vector<server_task> queue_tasks;
  307. std::vector<server_task> queue_tasks_deferred;
  308. std::vector<server_task_multi> queue_multitasks;
  309. std::mutex mutex_tasks;
  310. std::condition_variable condition_tasks;
  311. // callback functions
  312. std::function<void(server_task &)> callback_new_task;
  313. std::function<void(server_task_multi &)> callback_finish_multitask;
  314. std::function<void(void)> callback_update_slots;
  315. // Add a new task to the end of the queue
  316. int post(server_task task) {
  317. std::unique_lock<std::mutex> lock(mutex_tasks);
  318. if (task.id == -1) {
  319. task.id = id++;
  320. LOG_VERBOSE("new task id", {{"new_id", task.id}});
  321. }
  322. queue_tasks.push_back(std::move(task));
  323. condition_tasks.notify_one();
  324. return task.id;
  325. }
  326. // Add a new task, but defer until one slot is available
  327. void defer(server_task task) {
  328. std::unique_lock<std::mutex> lock(mutex_tasks);
  329. queue_tasks_deferred.push_back(std::move(task));
  330. }
  331. // Get the next id for creating anew task
  332. int get_new_id() {
  333. std::unique_lock<std::mutex> lock(mutex_tasks);
  334. int new_id = id++;
  335. LOG_VERBOSE("new task id", {{"new_id", new_id}});
  336. return new_id;
  337. }
  338. // Register function to process a new task
  339. void on_new_task(std::function<void(server_task &)> callback) {
  340. callback_new_task = std::move(callback);
  341. }
  342. // Register function to process a multitask when it is finished
  343. void on_finish_multitask(std::function<void(server_task_multi&)> callback) {
  344. callback_finish_multitask = std::move(callback);
  345. }
  346. // Register the function to be called when all slots data is ready to be processed
  347. void on_update_slots(std::function<void(void)> callback) {
  348. callback_update_slots = std::move(callback);
  349. }
  350. // Call when the state of one slot is changed
  351. void notify_slot_changed() {
  352. // move deferred tasks back to main loop
  353. std::unique_lock<std::mutex> lock(mutex_tasks);
  354. for (auto & task : queue_tasks_deferred) {
  355. queue_tasks.push_back(std::move(task));
  356. }
  357. queue_tasks_deferred.clear();
  358. }
  359. // end the start_loop routine
  360. void terminate() {
  361. std::unique_lock<std::mutex> lock(mutex_tasks);
  362. running = false;
  363. condition_tasks.notify_all();
  364. }
  365. /**
  366. * Main loop consists of these steps:
  367. * - Wait until a new task arrives
  368. * - Process the task (i.e. maybe copy data into slot)
  369. * - Check if multitask is finished
  370. * - Update all slots
  371. */
  372. void start_loop() {
  373. running = true;
  374. while (true) {
  375. LOG_VERBOSE("new task may arrive", {});
  376. while (true) {
  377. std::unique_lock<std::mutex> lock(mutex_tasks);
  378. if (queue_tasks.empty()) {
  379. lock.unlock();
  380. break;
  381. }
  382. server_task task = queue_tasks.front();
  383. queue_tasks.erase(queue_tasks.begin());
  384. lock.unlock();
  385. LOG_VERBOSE("callback_new_task", {{"id_task", task.id}});
  386. callback_new_task(task);
  387. }
  388. LOG_VERBOSE("update_multitasks", {});
  389. // check if we have any finished multitasks
  390. auto queue_iterator = queue_multitasks.begin();
  391. while (queue_iterator != queue_multitasks.end()) {
  392. if (queue_iterator->subtasks_remaining.empty()) {
  393. // all subtasks done == multitask is done
  394. server_task_multi current_multitask = *queue_iterator;
  395. callback_finish_multitask(current_multitask);
  396. // remove this multitask
  397. queue_iterator = queue_multitasks.erase(queue_iterator);
  398. } else {
  399. ++queue_iterator;
  400. }
  401. }
  402. // all tasks in the current loop is processed, slots data is now ready
  403. LOG_VERBOSE("callback_update_slots", {});
  404. callback_update_slots();
  405. LOG_VERBOSE("wait for new task", {});
  406. {
  407. std::unique_lock<std::mutex> lock(mutex_tasks);
  408. if (queue_tasks.empty()) {
  409. if (!running) {
  410. LOG_VERBOSE("ending start_loop", {});
  411. return;
  412. }
  413. condition_tasks.wait(lock, [&]{
  414. return (!queue_tasks.empty() || !running);
  415. });
  416. }
  417. }
  418. }
  419. }
  420. //
  421. // functions to manage multitasks
  422. //
  423. // add a multitask by specifying the id of all subtask (subtask is a server_task)
  424. void add_multitask(int id_multi, std::vector<int> & sub_ids) {
  425. std::lock_guard<std::mutex> lock(mutex_tasks);
  426. server_task_multi multi;
  427. multi.id = id_multi;
  428. std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
  429. queue_multitasks.push_back(multi);
  430. }
  431. // updatethe remaining subtasks, while appending results to multitask
  432. void update_multitask(int id_multi, int id_sub, server_task_result & result) {
  433. std::lock_guard<std::mutex> lock(mutex_tasks);
  434. for (auto & multitask : queue_multitasks) {
  435. if (multitask.id == id_multi) {
  436. multitask.subtasks_remaining.erase(id_sub);
  437. multitask.results.push_back(result);
  438. }
  439. }
  440. }
  441. };
  442. struct server_response {
  443. typedef std::function<void(int, int, server_task_result &)> callback_multitask_t;
  444. callback_multitask_t callback_update_multitask;
  445. // for keeping track of all tasks waiting for the result
  446. std::set<int> waiting_task_ids;
  447. // the main result queue
  448. std::vector<server_task_result> queue_results;
  449. std::mutex mutex_results;
  450. std::condition_variable condition_results;
  451. // add the id_task to the list of tasks waiting for response
  452. void add_waiting_task_id(int id_task) {
  453. LOG_VERBOSE("waiting for task id", {{"id_task", id_task}});
  454. std::unique_lock<std::mutex> lock(mutex_results);
  455. waiting_task_ids.insert(id_task);
  456. }
  457. // when the request is finished, we can remove task associated with it
  458. void remove_waiting_task_id(int id_task) {
  459. LOG_VERBOSE("remove waiting for task id", {{"id_task", id_task}});
  460. std::unique_lock<std::mutex> lock(mutex_results);
  461. waiting_task_ids.erase(id_task);
  462. }
  463. // This function blocks the thread until there is a response for this id_task
  464. server_task_result recv(int id_task) {
  465. while (true) {
  466. std::unique_lock<std::mutex> lock(mutex_results);
  467. condition_results.wait(lock, [&]{
  468. return !queue_results.empty();
  469. });
  470. for (int i = 0; i < (int) queue_results.size(); i++) {
  471. if (queue_results[i].id == id_task) {
  472. assert(queue_results[i].id_multi == -1);
  473. server_task_result res = queue_results[i];
  474. queue_results.erase(queue_results.begin() + i);
  475. return res;
  476. }
  477. }
  478. }
  479. // should never reach here
  480. }
  481. // Register the function to update multitask
  482. void on_multitask_update(callback_multitask_t callback) {
  483. callback_update_multitask = std::move(callback);
  484. }
  485. // Send a new result to a waiting id_task
  486. void send(server_task_result result) {
  487. LOG_VERBOSE("send new result", {{"id_task", result.id}});
  488. std::unique_lock<std::mutex> lock(mutex_results);
  489. for (const auto & id_task : waiting_task_ids) {
  490. // LOG_TEE("waiting task id %i \n", id_task);
  491. // for now, tasks that have associated parent multitasks just get erased once multitask picks up the result
  492. if (result.id_multi == id_task) {
  493. LOG_VERBOSE("callback_update_multitask", {{"id_task", id_task}});
  494. callback_update_multitask(id_task, result.id, result);
  495. continue;
  496. }
  497. if (result.id == id_task) {
  498. LOG_VERBOSE("queue_results.push_back", {{"id_task", id_task}});
  499. queue_results.push_back(result);
  500. condition_results.notify_all();
  501. return;
  502. }
  503. }
  504. }
  505. };
  506. struct server_context {
  507. llama_model * model = nullptr;
  508. llama_context * ctx = nullptr;
  509. gpt_params params;
  510. llama_batch batch;
  511. bool clean_kv_cache = true;
  512. bool add_bos_token = true;
  513. int32_t n_ctx; // total context for all clients / slots
  514. // system prompt
  515. bool system_need_update = false;
  516. std::string system_prompt;
  517. std::vector<llama_token> system_tokens;
  518. // slots / clients
  519. std::vector<server_slot> slots;
  520. json default_generation_settings_for_props;
  521. server_queue queue_tasks;
  522. server_response queue_results;
  523. server_metrics metrics;
  524. // Necessary similarity of prompt for slot selection
  525. float slot_prompt_similarity = 0.0f;
  526. ~server_context() {
  527. if (ctx) {
  528. llama_free(ctx);
  529. ctx = nullptr;
  530. }
  531. if (model) {
  532. llama_free_model(model);
  533. model = nullptr;
  534. }
  535. // Clear any sampling context
  536. for (server_slot & slot : slots) {
  537. if (slot.ctx_sampling != nullptr) {
  538. llama_sampling_free(slot.ctx_sampling);
  539. }
  540. }
  541. llama_batch_free(batch);
  542. }
  543. bool load_model(const gpt_params & params_) {
  544. params = params_;
  545. // dedicate one sequence to the system prompt
  546. params.n_parallel += 1;
  547. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  548. params.n_parallel -= 1; // but be sneaky about it
  549. if (model == nullptr) {
  550. LOG_ERROR("unable to load model", {{"model", params.model}});
  551. return false;
  552. }
  553. n_ctx = llama_n_ctx(ctx);
  554. add_bos_token = llama_should_add_bos_token(model);
  555. GGML_ASSERT(llama_add_eos_token(model) != 1);
  556. return true;
  557. }
  558. bool validate_model_chat_template() const {
  559. llama_chat_message chat[] = {{"user", "test"}};
  560. const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0);
  561. return res > 0;
  562. }
  563. void init() {
  564. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  565. LOG_INFO("initializing slots", {{"n_slots", params.n_parallel}});
  566. for (int i = 0; i < params.n_parallel; i++) {
  567. server_slot slot;
  568. slot.id = i;
  569. slot.n_ctx = n_ctx_slot;
  570. slot.n_predict = params.n_predict;
  571. LOG_INFO("new slot", {
  572. {"id_slot", slot.id},
  573. {"n_ctx_slot", slot.n_ctx}
  574. });
  575. const int ga_n = params.grp_attn_n;
  576. const int ga_w = params.grp_attn_w;
  577. if (ga_n != 1) {
  578. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  579. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  580. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  581. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  582. LOG_INFO("slot self-extend", {
  583. {"id_slot", slot.id},
  584. {"ga_n", ga_n},
  585. {"ga_w", ga_w}
  586. });
  587. }
  588. slot.ga_i = 0;
  589. slot.ga_n = ga_n;
  590. slot.ga_w = ga_w;
  591. slot.reset();
  592. slots.push_back(slot);
  593. }
  594. default_generation_settings_for_props = get_formated_generation(slots.front());
  595. default_generation_settings_for_props["seed"] = -1;
  596. // the update_slots() logic will always submit a maximum of n_batch tokens
  597. // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
  598. {
  599. const int32_t n_batch = llama_n_batch(ctx);
  600. // only a single seq_id per token is needed
  601. batch = llama_batch_init(n_batch, 0, 1);
  602. }
  603. metrics.init();
  604. }
  605. std::vector<llama_token> tokenize(const json & json_prompt, bool add_special) const {
  606. // TODO: currently, we tokenize using special tokens by default
  607. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  608. // but it's better compared to completely ignoring ChatML and other chat templates
  609. const bool TMP_FORCE_SPECIAL = true;
  610. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  611. // or the first element of the json_prompt array is a string.
  612. std::vector<llama_token> prompt_tokens;
  613. if (json_prompt.is_array()) {
  614. bool first = true;
  615. for (const auto & p : json_prompt) {
  616. if (p.is_string()) {
  617. auto s = p.template get<std::string>();
  618. std::vector<llama_token> p;
  619. if (first) {
  620. p = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
  621. first = false;
  622. } else {
  623. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  624. }
  625. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  626. } else {
  627. if (first) {
  628. first = false;
  629. }
  630. prompt_tokens.push_back(p.template get<llama_token>());
  631. }
  632. }
  633. } else {
  634. auto s = json_prompt.template get<std::string>();
  635. prompt_tokens = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
  636. }
  637. return prompt_tokens;
  638. }
  639. server_slot * get_slot_by_id(int id) {
  640. for (server_slot & slot : slots) {
  641. if (slot.id == id) {
  642. return &slot;
  643. }
  644. }
  645. return nullptr;
  646. }
  647. server_slot * get_available_slot(const std::string & prompt) {
  648. server_slot * ret = nullptr;
  649. // find the slot that has at least n% prompt similarity
  650. if (ret == nullptr && slot_prompt_similarity != 0.0f && !prompt.empty()) {
  651. int max_lcp_len = 0;
  652. float similarity = 0;
  653. for (server_slot & slot : slots) {
  654. // skip the slot if it is not available
  655. if (!slot.available()) {
  656. continue;
  657. }
  658. // current slot's prompt
  659. std::string slot_prompt = slot.prompt;
  660. // length of the current slot's prompt
  661. int slot_prompt_len = slot_prompt.size();
  662. // length of the Longest Common Prefix between the current slot's prompt and the input prompt
  663. int lcp_len = common_part(slot_prompt, prompt);
  664. // fraction of the common substring length compared to the current slot's prompt length
  665. similarity = static_cast<float>(lcp_len) / slot_prompt_len;
  666. // select the current slot if the criteria match
  667. if (lcp_len > max_lcp_len && similarity > slot_prompt_similarity) {
  668. max_lcp_len = lcp_len;
  669. ret = &slot;
  670. }
  671. }
  672. if (ret != nullptr) {
  673. LOG_VERBOSE("selected slot by lcp similarity", {
  674. {"id_slot", ret->id},
  675. {"max_lcp_len", max_lcp_len},
  676. {"similarity", similarity},
  677. });
  678. }
  679. }
  680. // find the slot that has been least recently used
  681. if (ret == nullptr) {
  682. int64_t t_last = ggml_time_us();
  683. for (server_slot & slot : slots) {
  684. // skip the slot if it is not available
  685. if (!slot.available()) {
  686. continue;
  687. }
  688. // select the current slot if the criteria match
  689. if (slot.t_last_used < t_last) {
  690. t_last = slot.t_last_used;
  691. ret = &slot;
  692. }
  693. }
  694. if (ret != nullptr) {
  695. LOG_VERBOSE("selected slot by lru", {
  696. {"id_slot", ret->id},
  697. {"t_last", t_last},
  698. });
  699. }
  700. }
  701. return ret;
  702. }
  703. bool launch_slot_with_task(server_slot & slot, const server_task & task) {
  704. slot_params default_params;
  705. llama_sampling_params default_sparams;
  706. auto & data = task.data;
  707. if (data.count("__oaicompat") != 0) {
  708. slot.oaicompat = true;
  709. slot.oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  710. } else {
  711. slot.oaicompat = false;
  712. slot.oaicompat_model = "";
  713. }
  714. slot.params.stream = json_value(data, "stream", false);
  715. slot.params.cache_prompt = json_value(data, "cache_prompt", false);
  716. slot.params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  717. slot.sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  718. slot.sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  719. slot.sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  720. slot.sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  721. slot.sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  722. slot.sparams.temp = json_value(data, "temperature", default_sparams.temp);
  723. slot.sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  724. slot.sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  725. slot.sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  726. slot.sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  727. slot.sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  728. slot.sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  729. slot.sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  730. slot.sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  731. slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  732. slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  733. slot.params.n_keep = json_value(data, "n_keep", slot.params.n_keep);
  734. slot.params.n_discard = json_value(data, "n_discard", default_params.n_discard);
  735. slot.sparams.seed = json_value(data, "seed", default_sparams.seed);
  736. slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  737. slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  738. // process "json_schema" and "grammar"
  739. if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
  740. send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
  741. return false;
  742. } else if (data.contains("json_schema") && !data.contains("grammar")) {
  743. try {
  744. auto schema = json_value(data, "json_schema", json::object());
  745. slot.sparams.grammar = json_schema_to_grammar(schema);
  746. } catch (const std::exception & e) {
  747. send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
  748. return false;
  749. }
  750. } else {
  751. slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  752. }
  753. if (slot.params.cache_prompt && slot.ga_n != 1) {
  754. LOG_WARNING("cache_prompt is not supported with group-attention", {});
  755. slot.params.cache_prompt = false;
  756. }
  757. if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) {
  758. // Might be better to reject the request with a 400 ?
  759. LOG_WARNING("Max tokens to predict exceeds server configuration", {
  760. {"params.n_predict", slot.params.n_predict},
  761. {"slot.n_predict", slot.n_predict},
  762. });
  763. slot.params.n_predict = slot.n_predict;
  764. }
  765. // infill
  766. slot.params.input_prefix = json_value(data, "input_prefix", default_params.input_prefix);
  767. slot.params.input_suffix = json_value(data, "input_suffix", default_params.input_suffix);
  768. // get prompt
  769. if (!task.infill) {
  770. const auto & prompt = data.find("prompt");
  771. if (prompt == data.end()) {
  772. send_error(task, "\"prompt\" must be provided", ERROR_TYPE_INVALID_REQUEST);
  773. return false;
  774. }
  775. if (prompt->is_string()) {
  776. slot.prompt = prompt->get<std::string>();
  777. } else if (prompt->is_array() && prompt->size() == 1 && prompt->at(0).is_string()) {
  778. slot.prompt = prompt->at(0).get<std::string>();
  779. } else {
  780. send_error(task, "\"prompt\" must be a string or an array of strings", ERROR_TYPE_INVALID_REQUEST);
  781. return false;
  782. }
  783. }
  784. // penalize user-provided tokens
  785. {
  786. slot.sparams.penalty_prompt_tokens.clear();
  787. slot.sparams.use_penalty_prompt_tokens = false;
  788. const auto & penalty_prompt = data.find("penalty_prompt");
  789. if (penalty_prompt != data.end()) {
  790. if (penalty_prompt->is_string()) {
  791. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  792. slot.sparams.penalty_prompt_tokens = llama_tokenize(model, penalty_prompt_string, false);
  793. if (slot.params.n_predict > 0) {
  794. slot.sparams.penalty_prompt_tokens.reserve(slot.sparams.penalty_prompt_tokens.size() + slot.params.n_predict);
  795. }
  796. slot.sparams.use_penalty_prompt_tokens = true;
  797. LOG_VERBOSE("penalty_prompt_tokens", {
  798. {"id_slot", slot.id},
  799. {"tokens", slot.sparams.penalty_prompt_tokens},
  800. });
  801. }
  802. else if (penalty_prompt->is_array()) {
  803. const auto n_tokens = penalty_prompt->size();
  804. slot.sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot.params.n_predict));
  805. const int n_vocab = llama_n_vocab(model);
  806. for (const auto & penalty_token : *penalty_prompt) {
  807. if (penalty_token.is_number_integer()) {
  808. const auto tok = penalty_token.get<llama_token>();
  809. if (tok >= 0 && tok < n_vocab) {
  810. slot.sparams.penalty_prompt_tokens.push_back(tok);
  811. }
  812. }
  813. }
  814. slot.sparams.use_penalty_prompt_tokens = true;
  815. LOG_VERBOSE("penalty_prompt_tokens", {
  816. {"id_slot", slot.id},
  817. {"tokens", slot.sparams.penalty_prompt_tokens},
  818. });
  819. }
  820. }
  821. }
  822. {
  823. slot.sparams.logit_bias.clear();
  824. if (json_value(data, "ignore_eos", false)) {
  825. slot.sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  826. }
  827. const auto & logit_bias = data.find("logit_bias");
  828. if (logit_bias != data.end() && logit_bias->is_array()) {
  829. const int n_vocab = llama_n_vocab(model);
  830. for (const auto & el : *logit_bias) {
  831. // TODO: we may want to throw errors here, in case "el" is incorrect
  832. if (el.is_array() && el.size() == 2) {
  833. float bias;
  834. if (el[1].is_number()) {
  835. bias = el[1].get<float>();
  836. } else if (el[1].is_boolean() && !el[1].get<bool>()) {
  837. bias = -INFINITY;
  838. } else {
  839. continue;
  840. }
  841. if (el[0].is_number_integer()) {
  842. llama_token tok = el[0].get<llama_token>();
  843. if (tok >= 0 && tok < n_vocab) {
  844. slot.sparams.logit_bias[tok] = bias;
  845. }
  846. } else if (el[0].is_string()) {
  847. auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
  848. for (auto tok : toks) {
  849. slot.sparams.logit_bias[tok] = bias;
  850. }
  851. }
  852. }
  853. }
  854. }
  855. }
  856. {
  857. slot.params.antiprompt.clear();
  858. const auto & stop = data.find("stop");
  859. if (stop != data.end() && stop->is_array()) {
  860. for (const auto & word : *stop) {
  861. if (!word.empty()) {
  862. slot.params.antiprompt.push_back(word);
  863. }
  864. }
  865. }
  866. }
  867. {
  868. const auto & samplers_sequence = data.find("samplers");
  869. if (samplers_sequence != data.end() && samplers_sequence->is_array()) {
  870. std::vector<std::string> sampler_names;
  871. for (const auto & sampler_name : *samplers_sequence) {
  872. if (sampler_name.is_string()) {
  873. sampler_names.emplace_back(sampler_name);
  874. }
  875. }
  876. slot.sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
  877. } else {
  878. slot.sparams.samplers_sequence = default_sparams.samplers_sequence;
  879. }
  880. }
  881. {
  882. if (slot.ctx_sampling != nullptr) {
  883. llama_sampling_free(slot.ctx_sampling);
  884. }
  885. slot.ctx_sampling = llama_sampling_init(slot.sparams);
  886. if (slot.ctx_sampling == nullptr) {
  887. // for now, the only error that may happen here is invalid grammar
  888. send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST);
  889. return false;
  890. }
  891. }
  892. slot.command = SLOT_COMMAND_LOAD_PROMPT;
  893. slot.prompt_tokens.clear();
  894. LOG_INFO("slot is processing task", {
  895. {"id_slot", slot.id},
  896. {"id_task", slot.id_task},
  897. });
  898. return true;
  899. }
  900. void kv_cache_clear() {
  901. LOG_VERBOSE("clearing KV cache", {});
  902. // clear the entire KV cache
  903. llama_kv_cache_clear(ctx);
  904. clean_kv_cache = false;
  905. }
  906. void system_prompt_update() {
  907. LOG_VERBOSE("system prompt update", {
  908. {"system_prompt", system_prompt},
  909. });
  910. kv_cache_clear();
  911. system_tokens.clear();
  912. if (!system_prompt.empty()) {
  913. system_tokens = ::llama_tokenize(ctx, system_prompt, true);
  914. llama_batch_clear(batch);
  915. for (int i = 0; i < (int)system_tokens.size(); ++i) {
  916. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  917. }
  918. const int32_t n_batch = llama_n_batch(ctx);
  919. for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
  920. const int32_t n_tokens = std::min(params.n_batch, batch.n_tokens - i);
  921. llama_batch batch_view = {
  922. n_tokens,
  923. batch.token + i,
  924. nullptr,
  925. batch.pos + i,
  926. batch.n_seq_id + i,
  927. batch.seq_id + i,
  928. batch.logits + i,
  929. 0, 0, 0, // unused
  930. };
  931. if (llama_decode(ctx, batch_view) != 0) {
  932. LOG_ERROR("llama_decode() failed", {});
  933. return;
  934. }
  935. }
  936. // assign the system KV cache to all parallel sequences
  937. for (int32_t i = 1; i <= params.n_parallel; ++i) {
  938. llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
  939. }
  940. }
  941. system_need_update = false;
  942. }
  943. bool system_prompt_set(const std::string & sys_prompt) {
  944. system_prompt = sys_prompt;
  945. LOG_VERBOSE("system prompt process", {
  946. {"system_prompt", system_prompt},
  947. });
  948. // release all slots
  949. for (server_slot & slot : slots) {
  950. slot.release();
  951. }
  952. system_need_update = true;
  953. return true;
  954. }
  955. bool process_token(completion_token_output & result, server_slot & slot) {
  956. // remember which tokens were sampled - used for repetition penalties during sampling
  957. const std::string token_str = llama_token_to_piece(ctx, result.tok, false);
  958. slot.sampled = result.tok;
  959. // search stop word and delete it
  960. slot.generated_text += token_str;
  961. slot.has_next_token = true;
  962. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1) {
  963. // we can change penalty_prompt_tokens because it is always created from scratch each request
  964. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  965. }
  966. // check if there is incomplete UTF-8 character at the end
  967. bool incomplete = false;
  968. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) {
  969. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  970. if ((c & 0xC0) == 0x80) {
  971. // continuation byte: 10xxxxxx
  972. continue;
  973. }
  974. if ((c & 0xE0) == 0xC0) {
  975. // 2-byte character: 110xxxxx ...
  976. incomplete = i < 2;
  977. } else if ((c & 0xF0) == 0xE0) {
  978. // 3-byte character: 1110xxxx ...
  979. incomplete = i < 3;
  980. } else if ((c & 0xF8) == 0xF0) {
  981. // 4-byte character: 11110xxx ...
  982. incomplete = i < 4;
  983. }
  984. // else 1-byte character or invalid byte
  985. break;
  986. }
  987. if (!incomplete) {
  988. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  989. const std::string str_test = slot.generated_text.substr(pos);
  990. bool is_stop_full = false;
  991. size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_FULL);
  992. if (stop_pos != std::string::npos) {
  993. is_stop_full = true;
  994. slot.generated_text.erase(
  995. slot.generated_text.begin() + pos + stop_pos,
  996. slot.generated_text.end());
  997. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  998. } else {
  999. is_stop_full = false;
  1000. stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_PARTIAL);
  1001. }
  1002. // check if there is any token to predict
  1003. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0)) {
  1004. // no send the stop word in the response
  1005. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  1006. slot.n_sent_text += result.text_to_send.size();
  1007. // add the token to slot queue and cache
  1008. }
  1009. slot.add_token_string(result);
  1010. if (slot.params.stream) {
  1011. send_partial_response(slot, result);
  1012. }
  1013. }
  1014. if (incomplete) {
  1015. slot.has_next_token = true;
  1016. }
  1017. // check the limits
  1018. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) {
  1019. slot.stopped_limit = true;
  1020. slot.has_next_token = false;
  1021. LOG_VERBOSE("stopped by limit", {
  1022. {"id_slot", slot.id},
  1023. {"id_task", slot.id_task},
  1024. {"n_decoded", slot.n_decoded},
  1025. {"n_predict", slot.params.n_predict},
  1026. });
  1027. }
  1028. if (llama_token_is_eog(model, result.tok)) {
  1029. slot.stopped_eos = true;
  1030. slot.has_next_token = false;
  1031. LOG_VERBOSE("eos token found", {});
  1032. }
  1033. auto n_ctx_train = llama_n_ctx_train(model);
  1034. if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.ga_n == 1
  1035. && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
  1036. LOG_WARNING("n_predict is not set and self-context extend is disabled."
  1037. " Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", {
  1038. { "id_slot", slot.id },
  1039. { "params.n_predict", slot.params.n_predict },
  1040. { "slot.n_prompt_tokens", slot.n_prompt_tokens },
  1041. { "slot.n_decoded", slot.n_decoded },
  1042. { "slot.n_predict", slot.n_predict },
  1043. { "n_slots", params.n_parallel },
  1044. { "slot.n_ctx", slot.n_ctx },
  1045. { "n_ctx", n_ctx },
  1046. { "n_ctx_train", n_ctx_train },
  1047. { "ga_n", slot.ga_n },
  1048. });
  1049. slot.truncated = true;
  1050. slot.stopped_limit = true;
  1051. slot.has_next_token = false; // stop prediction
  1052. }
  1053. LOG_VERBOSE("next token", {
  1054. {"id_slot", slot.id},
  1055. {"id_task", slot.id_task},
  1056. {"token", result.tok},
  1057. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  1058. {"has_next_token", slot.has_next_token},
  1059. {"n_remain", slot.n_remaining},
  1060. {"n_decoded", slot.n_decoded},
  1061. {"stopped_eos", slot.stopped_eos},
  1062. {"stopped_word", slot.stopped_word},
  1063. {"stopped_limit", slot.stopped_limit},
  1064. {"stopping_word", slot.stopping_word},
  1065. });
  1066. return slot.has_next_token; // continue
  1067. }
  1068. json get_formated_generation(const server_slot & slot) const {
  1069. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  1070. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() && eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  1071. std::vector<std::string> samplers_sequence;
  1072. samplers_sequence.reserve(slot.sparams.samplers_sequence.size());
  1073. for (const auto & sampler_type : slot.sparams.samplers_sequence) {
  1074. samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
  1075. }
  1076. return json {
  1077. {"n_ctx", slot.n_ctx},
  1078. {"n_predict", slot.n_predict},
  1079. {"model", params.model_alias},
  1080. {"seed", slot.sparams.seed},
  1081. {"temperature", slot.sparams.temp},
  1082. {"dynatemp_range", slot.sparams.dynatemp_range},
  1083. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  1084. {"top_k", slot.sparams.top_k},
  1085. {"top_p", slot.sparams.top_p},
  1086. {"min_p", slot.sparams.min_p},
  1087. {"tfs_z", slot.sparams.tfs_z},
  1088. {"typical_p", slot.sparams.typical_p},
  1089. {"repeat_last_n", slot.sparams.penalty_last_n},
  1090. {"repeat_penalty", slot.sparams.penalty_repeat},
  1091. {"presence_penalty", slot.sparams.penalty_present},
  1092. {"frequency_penalty", slot.sparams.penalty_freq},
  1093. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  1094. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  1095. {"mirostat", slot.sparams.mirostat},
  1096. {"mirostat_tau", slot.sparams.mirostat_tau},
  1097. {"mirostat_eta", slot.sparams.mirostat_eta},
  1098. {"penalize_nl", slot.sparams.penalize_nl},
  1099. {"stop", slot.params.antiprompt},
  1100. {"n_predict", slot.params.n_predict}, // TODO: fix duplicate key n_predict
  1101. {"n_keep", slot.params.n_keep},
  1102. {"n_discard", slot.params.n_discard},
  1103. {"ignore_eos", ignore_eos},
  1104. {"stream", slot.params.stream},
  1105. {"logit_bias", slot.sparams.logit_bias},
  1106. {"n_probs", slot.sparams.n_probs},
  1107. {"min_keep", slot.sparams.min_keep},
  1108. {"grammar", slot.sparams.grammar},
  1109. {"samplers", samplers_sequence}
  1110. };
  1111. }
  1112. void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  1113. send_error(task.id, task.id_multi, error, type);
  1114. }
  1115. void send_error(const server_slot & slot, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  1116. send_error(slot.id_task, slot.id_multi, error, type);
  1117. }
  1118. void send_error(const int id_task, const int id_multi, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  1119. LOG_ERROR("task error", {
  1120. {"id_multi", id_multi},
  1121. {"id_task", id_task},
  1122. {"error", error},
  1123. });
  1124. server_task_result res;
  1125. res.id = id_task;
  1126. res.id_multi = id_multi;
  1127. res.stop = false;
  1128. res.error = true;
  1129. res.data = format_error_response(error, type);
  1130. queue_results.send(res);
  1131. }
  1132. void send_partial_response(server_slot & slot, completion_token_output tkn) {
  1133. server_task_result res;
  1134. res.id = slot.id_task;
  1135. res.id_multi = slot.id_multi;
  1136. res.error = false;
  1137. res.stop = false;
  1138. res.data = json {
  1139. {"content", tkn.text_to_send},
  1140. {"stop", false},
  1141. {"id_slot", slot.id},
  1142. {"multimodal", false}
  1143. };
  1144. if (slot.sparams.n_probs > 0) {
  1145. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  1146. const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  1147. const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  1148. std::vector<completion_token_output> probs_output;
  1149. if (probs_pos < probs_stop_pos) {
  1150. probs_output = std::vector<completion_token_output>(
  1151. slot.generated_token_probs.begin() + probs_pos,
  1152. slot.generated_token_probs.begin() + probs_stop_pos);
  1153. }
  1154. slot.n_sent_token_probs = probs_stop_pos;
  1155. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  1156. }
  1157. if (slot.oaicompat) {
  1158. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1159. res.data["model"] = slot.oaicompat_model;
  1160. }
  1161. queue_results.send(res);
  1162. }
  1163. void send_final_response(const server_slot & slot) {
  1164. server_task_result res;
  1165. res.id = slot.id_task;
  1166. res.id_multi = slot.id_multi;
  1167. res.error = false;
  1168. res.stop = true;
  1169. res.data = json {
  1170. {"content", !slot.params.stream ? slot.generated_text : ""},
  1171. {"id_slot", slot.id},
  1172. {"stop", true},
  1173. {"model", params.model_alias},
  1174. {"tokens_predicted", slot.n_decoded},
  1175. {"tokens_evaluated", slot.n_prompt_tokens},
  1176. {"generation_settings", get_formated_generation(slot)},
  1177. {"prompt", slot.prompt},
  1178. {"truncated", slot.truncated},
  1179. {"stopped_eos", slot.stopped_eos},
  1180. {"stopped_word", slot.stopped_word},
  1181. {"stopped_limit", slot.stopped_limit},
  1182. {"stopping_word", slot.stopping_word},
  1183. {"tokens_cached", slot.n_past},
  1184. {"timings", slot.get_formated_timings()}
  1185. };
  1186. if (slot.sparams.n_probs > 0) {
  1187. std::vector<completion_token_output> probs;
  1188. if (!slot.params.stream && slot.stopped_word) {
  1189. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  1190. size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
  1191. probs = std::vector<completion_token_output>(
  1192. slot.generated_token_probs.begin(),
  1193. slot.generated_token_probs.end() - safe_offset);
  1194. } else {
  1195. probs = std::vector<completion_token_output>(
  1196. slot.generated_token_probs.begin(),
  1197. slot.generated_token_probs.end());
  1198. }
  1199. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1200. }
  1201. if (slot.oaicompat) {
  1202. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1203. res.data["model"] = slot.oaicompat_model;
  1204. }
  1205. queue_results.send(res);
  1206. }
  1207. void send_embedding(const server_slot & slot, const llama_batch & batch) {
  1208. server_task_result res;
  1209. res.id = slot.id_task;
  1210. res.id_multi = slot.id_multi;
  1211. res.error = false;
  1212. res.stop = true;
  1213. const int n_embd = llama_n_embd(model);
  1214. std::vector<float> embd_res(n_embd, 0.0f);
  1215. for (int i = 0; i < batch.n_tokens; ++i) {
  1216. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id + 1) {
  1217. continue;
  1218. }
  1219. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1220. if (embd == NULL) {
  1221. embd = llama_get_embeddings_ith(ctx, i);
  1222. }
  1223. if (embd == NULL) {
  1224. LOG_ERROR("failed to get embeddings", {
  1225. {"token", batch.token [i]},
  1226. {"seq_id", batch.seq_id[i][0]}
  1227. });
  1228. res.data = json {
  1229. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1230. };
  1231. continue;
  1232. }
  1233. llama_embd_normalize(embd, embd_res.data(), n_embd);
  1234. res.data = json {
  1235. {"embedding", embd_res},
  1236. };
  1237. }
  1238. queue_results.send(res);
  1239. }
  1240. void request_completion(int id_task, int id_multi, json data, bool infill, bool embedding) {
  1241. server_task task;
  1242. task.id = id_task;
  1243. task.id_multi = id_multi;
  1244. task.id_target = 0;
  1245. task.data = std::move(data);
  1246. task.infill = infill;
  1247. task.embedding = embedding;
  1248. task.type = SERVER_TASK_TYPE_COMPLETION;
  1249. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1250. // otherwise, it's a single-prompt task, we actually queue it
  1251. // if there's numbers in the prompt array it will be treated as an array of tokens
  1252. if (task.data.count("prompt") != 0 && task.data.at("prompt").size() > 1) {
  1253. bool numbers = false;
  1254. for (const auto & e : task.data.at("prompt")) {
  1255. if (e.is_number()) {
  1256. numbers = true;
  1257. break;
  1258. }
  1259. }
  1260. // NOTE: split_multiprompt_task() does not handle a mix of strings and numbers,
  1261. // it will completely stall the server. I don't know where the bug for this is.
  1262. //
  1263. // if there are numbers, it needs to be treated like a single prompt,
  1264. // queue_tasks handles a mix of strings and numbers just fine.
  1265. if (numbers) {
  1266. queue_tasks.post(task);
  1267. } else {
  1268. split_multiprompt_task(id_task, task);
  1269. }
  1270. } else {
  1271. queue_tasks.post(task);
  1272. }
  1273. }
  1274. void request_cancel(int id_task) {
  1275. server_task task;
  1276. task.type = SERVER_TASK_TYPE_CANCEL;
  1277. task.id_target = id_task;
  1278. queue_tasks.post(task);
  1279. }
  1280. void split_multiprompt_task(int id_multi, const server_task & multiprompt_task) {
  1281. const int prompt_count = multiprompt_task.data.at("prompt").size();
  1282. if (prompt_count <= 1) {
  1283. send_error(multiprompt_task, "error while handling multiple prompts");
  1284. return;
  1285. }
  1286. // generate all the ID for subtask
  1287. std::vector<int> subtask_ids(prompt_count);
  1288. for (int i = 0; i < prompt_count; i++) {
  1289. subtask_ids[i] = queue_tasks.get_new_id();
  1290. }
  1291. // queue up the multitask so we can track its subtask progression
  1292. queue_tasks.add_multitask(id_multi, subtask_ids);
  1293. // add subtasks
  1294. for (int i = 0; i < prompt_count; i++) {
  1295. json subtask_data = multiprompt_task.data;
  1296. subtask_data["prompt"] = subtask_data.at("prompt")[i];
  1297. // subtasks inherit everything else (infill mode, embedding mode, etc.)
  1298. request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill, multiprompt_task.embedding);
  1299. }
  1300. }
  1301. void process_single_task(const server_task & task) {
  1302. switch (task.type) {
  1303. case SERVER_TASK_TYPE_COMPLETION:
  1304. {
  1305. const int id_slot = json_value(task.data, "id_slot", -1);
  1306. server_slot * slot;
  1307. if (id_slot != -1) {
  1308. slot = get_slot_by_id(id_slot);
  1309. } else {
  1310. std::string prompt;
  1311. if (task.data.contains("prompt") && task.data.at("prompt").is_string()) {
  1312. json_value(task.data, "prompt", std::string());
  1313. }
  1314. slot = get_available_slot(prompt);
  1315. }
  1316. if (slot == nullptr) {
  1317. // if no slot is available, we defer this task for processing later
  1318. LOG_VERBOSE("no slot is available", {{"id_task", task.id}});
  1319. queue_tasks.defer(task);
  1320. break;
  1321. }
  1322. if (!slot->available()) {
  1323. // if requested slot is unavailable, we defer this task for processing later
  1324. LOG_VERBOSE("requested slot is unavailable", {{"id_task", task.id}});
  1325. queue_tasks.defer(task);
  1326. break;
  1327. }
  1328. if (task.data.contains("system_prompt")) {
  1329. std::string sys_prompt = json_value(task.data, "system_prompt", std::string());
  1330. system_prompt_set(sys_prompt);
  1331. for (server_slot & slot : slots) {
  1332. slot.n_past = 0;
  1333. slot.n_past_se = 0;
  1334. }
  1335. }
  1336. slot->reset();
  1337. slot->id_task = task.id;
  1338. slot->id_multi = task.id_multi;
  1339. slot->infill = task.infill;
  1340. slot->embedding = task.embedding;
  1341. if (!launch_slot_with_task(*slot, task)) {
  1342. LOG_ERROR("error while launching slot", task.data);
  1343. break;
  1344. }
  1345. } break;
  1346. case SERVER_TASK_TYPE_CANCEL:
  1347. {
  1348. // release slot linked with the task id
  1349. for (auto & slot : slots) {
  1350. if (slot.id_task == task.id_target) {
  1351. slot.release();
  1352. break;
  1353. }
  1354. }
  1355. } break;
  1356. case SERVER_TASK_TYPE_NEXT_RESPONSE:
  1357. {
  1358. // do nothing
  1359. } break;
  1360. case SERVER_TASK_TYPE_METRICS:
  1361. {
  1362. json slots_data = json::array();
  1363. int n_idle_slots = 0;
  1364. int n_processing_slots = 0;
  1365. for (server_slot & slot : slots) {
  1366. json slot_data = get_formated_generation(slot);
  1367. slot_data["id"] = slot.id;
  1368. slot_data["id_task"] = slot.id_task;
  1369. slot_data["state"] = slot.state;
  1370. slot_data["prompt"] = slot.prompt;
  1371. slot_data["next_token"] = {
  1372. {"has_next_token", slot.has_next_token},
  1373. {"n_remain", slot.n_remaining},
  1374. {"n_decoded", slot.n_decoded},
  1375. {"stopped_eos", slot.stopped_eos},
  1376. {"stopped_word", slot.stopped_word},
  1377. {"stopped_limit", slot.stopped_limit},
  1378. {"stopping_word", slot.stopping_word},
  1379. };
  1380. if (slot_data["state"] == SLOT_STATE_IDLE) {
  1381. n_idle_slots++;
  1382. } else {
  1383. n_processing_slots++;
  1384. }
  1385. slots_data.push_back(slot_data);
  1386. }
  1387. LOG_INFO("slot data", {
  1388. {"id_task", task.id},
  1389. {"n_idle_slots", n_idle_slots},
  1390. {"n_processing_slots", n_processing_slots}
  1391. });
  1392. LOG_VERBOSE("slot data", {
  1393. {"id_task", task.id},
  1394. {"n_idle_slots", n_idle_slots},
  1395. {"n_processing_slots", n_processing_slots},
  1396. {"slots", slots_data}
  1397. });
  1398. server_task_result res;
  1399. res.id = task.id;
  1400. res.id_multi = task.id_multi;
  1401. res.stop = true;
  1402. res.error = false;
  1403. res.data = {
  1404. { "idle", n_idle_slots },
  1405. { "processing", n_processing_slots },
  1406. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1407. { "t_start", metrics.t_start},
  1408. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1409. { "t_tokens_generation_total", metrics.t_tokens_generation_total},
  1410. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1411. { "t_prompt_processing_total", metrics.t_prompt_processing_total},
  1412. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1413. { "t_prompt_processing", metrics.t_prompt_processing},
  1414. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1415. { "t_tokens_generation", metrics.t_tokens_generation},
  1416. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1417. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1418. { "slots", slots_data },
  1419. };
  1420. if (json_value(task.data, "reset_bucket", false)) {
  1421. metrics.reset_bucket();
  1422. }
  1423. queue_results.send(res);
  1424. } break;
  1425. case SERVER_TASK_TYPE_SLOT_SAVE:
  1426. {
  1427. int id_slot = task.data.at("id_slot");
  1428. server_slot * slot = get_slot_by_id(id_slot);
  1429. if (slot == nullptr) {
  1430. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1431. break;
  1432. }
  1433. if (!slot->available()) {
  1434. // if requested slot is unavailable, we defer this task for processing later
  1435. LOG_VERBOSE("requested slot is unavailable", {{"id_task", task.id}});
  1436. queue_tasks.defer(task);
  1437. break;
  1438. }
  1439. const size_t token_count = slot->cache_tokens.size();
  1440. const int64_t t_start = ggml_time_us();
  1441. std::string filename = task.data.at("filename");
  1442. std::string filepath = task.data.at("filepath");
  1443. const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
  1444. const int64_t t_end = ggml_time_us();
  1445. const double t_save_ms = (t_end - t_start) / 1000.0;
  1446. server_task_result result;
  1447. result.id = task.id;
  1448. result.stop = true;
  1449. result.error = false;
  1450. result.data = json {
  1451. { "id_slot", id_slot },
  1452. { "filename", filename },
  1453. { "n_saved", token_count }, // tokens saved
  1454. { "n_written", nwrite }, // bytes written
  1455. { "timings", {
  1456. { "save_ms", t_save_ms }
  1457. } }
  1458. };
  1459. queue_results.send(result);
  1460. } break;
  1461. case SERVER_TASK_TYPE_SLOT_RESTORE:
  1462. {
  1463. int id_slot = task.data.at("id_slot");
  1464. server_slot * slot = get_slot_by_id(id_slot);
  1465. if (slot == nullptr) {
  1466. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1467. break;
  1468. }
  1469. if (!slot->available()) {
  1470. // if requested slot is unavailable, we defer this task for processing later
  1471. LOG_VERBOSE("requested slot is unavailable", {{"id_task", task.id}});
  1472. queue_tasks.defer(task);
  1473. break;
  1474. }
  1475. const int64_t t_start = ggml_time_us();
  1476. std::string filename = task.data.at("filename");
  1477. std::string filepath = task.data.at("filepath");
  1478. slot->cache_tokens.resize(slot->n_ctx);
  1479. size_t token_count = 0;
  1480. size_t nread = llama_state_seq_load_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), slot->cache_tokens.size(), &token_count);
  1481. if (nread == 0) {
  1482. slot->cache_tokens.resize(0);
  1483. send_error(task, "Unable to restore slot, no available space in KV cache or invalid slot save file", ERROR_TYPE_INVALID_REQUEST);
  1484. break;
  1485. }
  1486. slot->cache_tokens.resize(token_count);
  1487. const int64_t t_end = ggml_time_us();
  1488. const double t_restore_ms = (t_end - t_start) / 1000.0;
  1489. server_task_result result;
  1490. result.id = task.id;
  1491. result.stop = true;
  1492. result.error = false;
  1493. result.data = json {
  1494. { "id_slot", id_slot },
  1495. { "filename", filename },
  1496. { "n_restored", token_count }, // tokens restored
  1497. { "n_read", nread }, // bytes read
  1498. { "timings", {
  1499. { "restore_ms", t_restore_ms }
  1500. } }
  1501. };
  1502. queue_results.send(result);
  1503. } break;
  1504. case SERVER_TASK_TYPE_SLOT_ERASE:
  1505. {
  1506. int id_slot = task.data.at("id_slot");
  1507. server_slot * slot = get_slot_by_id(id_slot);
  1508. if (slot == nullptr) {
  1509. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1510. break;
  1511. }
  1512. if (!slot->available()) {
  1513. // if requested slot is unavailable, we defer this task for processing later
  1514. LOG_VERBOSE("requested slot is unavailable", {{"id_task", task.id}});
  1515. queue_tasks.defer(task);
  1516. break;
  1517. }
  1518. // Erase token cache
  1519. const size_t n_erased = slot->cache_tokens.size();
  1520. llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1);
  1521. slot->cache_tokens.clear();
  1522. server_task_result result;
  1523. result.id = task.id;
  1524. result.stop = true;
  1525. result.error = false;
  1526. result.data = json {
  1527. { "id_slot", id_slot },
  1528. { "n_erased", n_erased }
  1529. };
  1530. queue_results.send(result);
  1531. } break;
  1532. }
  1533. }
  1534. void on_finish_multitask(const server_task_multi & multitask) {
  1535. // all subtasks done == multitask is done
  1536. server_task_result result;
  1537. result.id = multitask.id;
  1538. result.stop = true;
  1539. result.error = false;
  1540. // collect json results into one json result
  1541. std::vector<json> result_jsons;
  1542. for (const auto & subres : multitask.results) {
  1543. result_jsons.push_back(subres.data);
  1544. result.error = result.error && subres.error;
  1545. }
  1546. result.data = json {
  1547. { "results", result_jsons }
  1548. };
  1549. queue_results.send(result);
  1550. }
  1551. void update_slots() {
  1552. if (system_need_update) {
  1553. system_prompt_update();
  1554. }
  1555. // release slots
  1556. for (auto & slot : slots) {
  1557. if (slot.command == SLOT_COMMAND_RELEASE) {
  1558. slot.state = SLOT_STATE_IDLE;
  1559. slot.command = SLOT_COMMAND_NONE;
  1560. slot.t_last_used = ggml_time_us();
  1561. LOG_INFO("slot released", {
  1562. {"id_slot", slot.id},
  1563. {"id_task", slot.id_task},
  1564. {"n_ctx", n_ctx},
  1565. {"n_past", slot.n_past},
  1566. {"n_system_tokens", system_tokens.size()},
  1567. {"n_cache_tokens", slot.cache_tokens.size()},
  1568. {"truncated", slot.truncated}
  1569. });
  1570. queue_tasks.notify_slot_changed();
  1571. }
  1572. }
  1573. // check if all slots are idle
  1574. {
  1575. bool all_idle = true;
  1576. for (auto & slot : slots) {
  1577. if (slot.state != SLOT_STATE_IDLE || slot.command != SLOT_COMMAND_NONE) {
  1578. all_idle = false;
  1579. break;
  1580. }
  1581. }
  1582. if (all_idle) {
  1583. LOG_INFO("all slots are idle", {});
  1584. if (system_prompt.empty() && clean_kv_cache) {
  1585. kv_cache_clear();
  1586. }
  1587. return;
  1588. }
  1589. }
  1590. {
  1591. LOG_VERBOSE("posting NEXT_RESPONSE", {});
  1592. server_task task;
  1593. task.type = SERVER_TASK_TYPE_NEXT_RESPONSE;
  1594. task.id_target = -1;
  1595. queue_tasks.post(task);
  1596. }
  1597. // apply context-shift if needed
  1598. // TODO: simplify and improve
  1599. for (server_slot & slot : slots) {
  1600. if (slot.ga_n == 1) {
  1601. if (slot.is_processing() && (int) system_tokens.size() + slot.n_past >= slot.n_ctx - 1) {
  1602. // Shift context
  1603. const int n_keep = slot.params.n_keep + add_bos_token;
  1604. const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
  1605. const int n_discard = slot.params.n_discard ? slot.params.n_discard : (n_left / 2);
  1606. LOG_INFO("slot context shift", {
  1607. {"id_slot", slot.id},
  1608. {"id_task", slot.id_task},
  1609. {"n_keep", n_keep},
  1610. {"n_left", n_left},
  1611. {"n_discard", n_discard},
  1612. {"n_ctx", n_ctx},
  1613. {"n_past", slot.n_past},
  1614. {"n_system_tokens", system_tokens.size()},
  1615. {"n_cache_tokens", slot.cache_tokens.size()}
  1616. });
  1617. llama_kv_cache_seq_rm (ctx, slot.id + 1, n_keep , n_keep + n_discard);
  1618. llama_kv_cache_seq_add(ctx, slot.id + 1, n_keep + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1619. if (slot.params.cache_prompt) {
  1620. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++) {
  1621. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1622. }
  1623. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1624. }
  1625. slot.n_past -= n_discard;
  1626. slot.truncated = true;
  1627. }
  1628. }
  1629. }
  1630. // start populating the batch for this iteration
  1631. llama_batch_clear(batch);
  1632. // frist, add sampled tokens from any ongoing sequences
  1633. for (auto & slot : slots) {
  1634. if (slot.state == SLOT_STATE_IDLE) {
  1635. continue;
  1636. }
  1637. slot.i_batch = batch.n_tokens;
  1638. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1639. // TODO: we always have to take into account the "system_tokens"
  1640. // this is not great and needs to be improved somehow
  1641. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id + 1 }, true);
  1642. slot.n_past += 1;
  1643. if (slot.params.cache_prompt) {
  1644. slot.cache_tokens.push_back(slot.sampled);
  1645. }
  1646. LOG_VERBOSE("slot decode token", {
  1647. {"id_slot", slot.id},
  1648. {"id_task", slot.id_task},
  1649. {"n_ctx", n_ctx},
  1650. {"n_past", slot.n_past},
  1651. {"n_system_tokens", system_tokens.size()},
  1652. {"n_cache_tokens", slot.cache_tokens.size()},
  1653. {"truncated", slot.truncated}
  1654. });
  1655. }
  1656. // process in chunks of params.n_batch
  1657. int32_t n_batch = llama_n_batch(ctx);
  1658. int32_t n_ubatch = llama_n_ubatch(ctx);
  1659. // next, batch any pending prompts without exceeding n_batch
  1660. if (params.cont_batching || batch.n_tokens == 0) {
  1661. for (auto & slot : slots) {
  1662. // this slot still has a prompt to be processed
  1663. if (slot.state == SLOT_STATE_IDLE && slot.command == SLOT_COMMAND_LOAD_PROMPT) {
  1664. auto & prompt_tokens = slot.prompt_tokens;
  1665. // we haven't tokenized the prompt yet - do it now:
  1666. if (prompt_tokens.empty()) {
  1667. LOG_VERBOSE("tokenizing prompt", {
  1668. {"id_slot", slot.id},
  1669. {"id_task", slot.id_task}
  1670. });
  1671. slot.t_start_process_prompt = ggml_time_us();
  1672. slot.t_start_generation = 0;
  1673. if (slot.infill) {
  1674. bool suff_rm_leading_spc = true;
  1675. if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
  1676. params.input_suffix.erase(0, 1);
  1677. suff_rm_leading_spc = false;
  1678. }
  1679. auto prefix_tokens = tokenize(slot.params.input_prefix, false);
  1680. auto suffix_tokens = tokenize(slot.params.input_suffix, false);
  1681. const int space_token = 29871; // TODO: this should not be hardcoded
  1682. if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
  1683. suffix_tokens.erase(suffix_tokens.begin());
  1684. }
  1685. prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
  1686. prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
  1687. prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
  1688. prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
  1689. prefix_tokens.push_back(llama_token_middle(model));
  1690. prompt_tokens = prefix_tokens;
  1691. } else {
  1692. prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
  1693. }
  1694. slot.n_past = 0;
  1695. slot.n_prompt_tokens = prompt_tokens.size();
  1696. LOG_VERBOSE("prompt tokenized", {
  1697. {"id_slot", slot.id},
  1698. {"id_task", slot.id_task},
  1699. {"n_ctx", slot.n_ctx},
  1700. {"n_keep", slot.params.n_keep},
  1701. {"n_prompt_tokens", slot.n_prompt_tokens},
  1702. {"prompt_tokens", tokens_to_str(ctx, prompt_tokens.cbegin(), prompt_tokens.cend())},
  1703. });
  1704. // empty prompt passed -> release the slot and send empty response
  1705. if (prompt_tokens.empty()) {
  1706. LOG_INFO("empty prompt - releasing slot", {
  1707. {"id_slot", slot.id},
  1708. {"id_task", slot.id_task}
  1709. });
  1710. slot.state = SLOT_STATE_PROCESSING;
  1711. slot.command = SLOT_COMMAND_NONE;
  1712. slot.release();
  1713. slot.print_timings();
  1714. send_final_response(slot);
  1715. continue;
  1716. }
  1717. if (slot.embedding) {
  1718. // this prompt is too large to process - discard it
  1719. if (slot.n_prompt_tokens > n_ubatch) {
  1720. slot.state = SLOT_STATE_PROCESSING;
  1721. slot.command = SLOT_COMMAND_NONE;
  1722. slot.release();
  1723. send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
  1724. continue;
  1725. }
  1726. } else {
  1727. if (slot.params.n_keep < 0) {
  1728. slot.params.n_keep = slot.n_prompt_tokens;
  1729. }
  1730. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1731. // if input prompt is too big, truncate it (if group attention self-extend is disabled)
  1732. if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) {
  1733. const int n_left = slot.n_ctx - slot.params.n_keep;
  1734. const int n_block_size = n_left / 2;
  1735. const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1736. std::vector<llama_token> new_tokens(
  1737. prompt_tokens.begin(),
  1738. prompt_tokens.begin() + slot.params.n_keep);
  1739. new_tokens.insert(
  1740. new_tokens.end(),
  1741. prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
  1742. prompt_tokens.end());
  1743. prompt_tokens = std::move(new_tokens);
  1744. slot.truncated = true;
  1745. slot.n_prompt_tokens = prompt_tokens.size();
  1746. LOG_VERBOSE("input truncated", {
  1747. {"id_slot", slot.id},
  1748. {"id_task", slot.id_task},
  1749. {"n_ctx", slot.n_ctx},
  1750. {"n_keep", slot.params.n_keep},
  1751. {"n_left", n_left},
  1752. {"n_prompt_tokens", slot.n_prompt_tokens},
  1753. {"prompt_tokens", tokens_to_str(ctx, prompt_tokens.cbegin(), prompt_tokens.cend())},
  1754. });
  1755. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1756. }
  1757. llama_sampling_reset(slot.ctx_sampling);
  1758. if (!slot.params.cache_prompt) {
  1759. slot.n_past_se = 0;
  1760. slot.ga_i = 0;
  1761. } else {
  1762. GGML_ASSERT(slot.ga_n == 1);
  1763. // reuse any previously computed tokens that are common with the new prompt
  1764. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1765. // push the prompt into the sampling context (do not apply grammar)
  1766. for (int i = 0; i < slot.n_past; ++i) {
  1767. llama_sampling_accept(slot.ctx_sampling, ctx, slot.cache_tokens[i], false);
  1768. }
  1769. }
  1770. }
  1771. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) {
  1772. // we have to evaluate at least 1 token to generate logits.
  1773. LOG_INFO("we have to evaluate at least 1 token to generate logits", {
  1774. { "id_slot", slot.id },
  1775. { "id_task", slot.id_task }
  1776. });
  1777. slot.n_past--;
  1778. if (slot.ga_i > 0) {
  1779. slot.n_past_se--;
  1780. }
  1781. }
  1782. slot.n_prompt_tokens_processed = 0;
  1783. }
  1784. if (slot.embedding) {
  1785. // cannot fit the prompt in the current batch - will try next iter
  1786. if (batch.n_tokens + slot.n_prompt_tokens > n_batch) {
  1787. continue;
  1788. }
  1789. }
  1790. // keep only the common part
  1791. int p0 = (int) system_tokens.size() + slot.n_past;
  1792. if (!llama_kv_cache_seq_rm(ctx, slot.id + 1, p0, -1)) {
  1793. // could not partially delete (likely using a non-Transformer model)
  1794. llama_kv_cache_seq_rm(ctx, slot.id + 1, -1, -1);
  1795. p0 = (int) system_tokens.size();
  1796. if (p0 != 0) {
  1797. // copy over the system prompt when there is one
  1798. llama_kv_cache_seq_cp(ctx, 0, slot.id + 1, -1, -1);
  1799. }
  1800. // there is no common part left (except for the system prompt)
  1801. slot.n_past = 0;
  1802. slot.n_past_se = 0;
  1803. slot.ga_i = 0;
  1804. // TODO: is the system prompt ever in the sampling context?
  1805. llama_sampling_reset(slot.ctx_sampling);
  1806. }
  1807. // remove the non-common part from the cache
  1808. slot.cache_tokens.resize(slot.n_past);
  1809. LOG_INFO("kv cache rm [p0, end)", {
  1810. { "id_slot", slot.id },
  1811. { "id_task", slot.id_task },
  1812. { "p0", p0 }
  1813. });
  1814. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1815. int32_t ga_i = slot.ga_i;
  1816. int32_t ga_n = slot.ga_n;
  1817. int32_t ga_w = slot.ga_w;
  1818. // add prompt tokens for processing in the current batch
  1819. // TODO: the self-extend stuff here is a mess - simplify and/or abstract it somehow
  1820. for (; slot.n_past < slot.n_prompt_tokens && batch.n_tokens < n_batch; ++slot.n_past) {
  1821. if (slot.ga_n != 1) {
  1822. while (slot_npast >= ga_i + ga_w) {
  1823. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1824. slot_npast -= bd;
  1825. ga_i += ga_w/ga_n;
  1826. }
  1827. }
  1828. llama_batch_add(batch, prompt_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id + 1 }, false);
  1829. if (slot.params.cache_prompt) {
  1830. slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
  1831. }
  1832. slot.n_prompt_tokens_processed++;
  1833. slot_npast++;
  1834. }
  1835. LOG_VERBOSE("prompt processing progress", {
  1836. {"id_slot", slot.id},
  1837. {"n_past", slot.n_past},
  1838. {"n_ctx", n_ctx},
  1839. {"n_tokens", batch.n_tokens},
  1840. {"progress", (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens},
  1841. });
  1842. // entire prompt has been processed - start decoding new tokens
  1843. if (slot.n_past == slot.n_prompt_tokens) {
  1844. slot.state = SLOT_STATE_PROCESSING;
  1845. slot.command = SLOT_COMMAND_NONE;
  1846. GGML_ASSERT(batch.n_tokens > 0);
  1847. // extract the logits only for the last token
  1848. batch.logits[batch.n_tokens - 1] = true;
  1849. slot.n_decoded = 0;
  1850. slot.i_batch = batch.n_tokens - 1;
  1851. LOG_VERBOSE("prompt done", {
  1852. {"id_slot", slot.id},
  1853. {"n_past", slot.n_past},
  1854. {"n_ctx", n_ctx},
  1855. {"n_tokens", batch.n_tokens},
  1856. });
  1857. }
  1858. }
  1859. if (batch.n_tokens >= n_batch) {
  1860. break;
  1861. }
  1862. }
  1863. }
  1864. if (batch.n_tokens == 0) {
  1865. LOG_VERBOSE("no tokens to decode", {});
  1866. return;
  1867. }
  1868. LOG_VERBOSE("decoding batch", {
  1869. {"n_tokens", batch.n_tokens},
  1870. });
  1871. // process the created batch of tokens
  1872. for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
  1873. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1874. for (auto & slot : slots) {
  1875. if (slot.ga_n != 1) {
  1876. // context extension via Self-Extend
  1877. // TODO: simplify and/or abstract this
  1878. while (slot.n_past_se >= slot.ga_i + slot.ga_w) {
  1879. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1880. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1881. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1882. LOG_TEE("\n");
  1883. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1884. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1885. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1886. llama_kv_cache_seq_add(ctx, slot.id + 1, slot.ga_i, slot.n_past_se, ib * bd);
  1887. llama_kv_cache_seq_div(ctx, slot.id + 1, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n);
  1888. llama_kv_cache_seq_add(ctx, slot.id + 1, slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd);
  1889. slot.n_past_se -= bd;
  1890. slot.ga_i += slot.ga_w / slot.ga_n;
  1891. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1892. }
  1893. slot.n_past_se += n_tokens;
  1894. }
  1895. }
  1896. llama_batch batch_view = {
  1897. n_tokens,
  1898. batch.token + i,
  1899. nullptr,
  1900. batch.pos + i,
  1901. batch.n_seq_id + i,
  1902. batch.seq_id + i,
  1903. batch.logits + i,
  1904. 0, 0, 0, // unused
  1905. };
  1906. const int ret = llama_decode(ctx, batch_view);
  1907. if (ret != 0) {
  1908. if (n_batch == 1 || ret < 0) {
  1909. // if you get here, it means the KV cache is full - try increasing it via the context size
  1910. LOG_ERROR("failed to decode the batch: KV cache is full - try increasing it via the context size", {
  1911. {"i", i},
  1912. {"n_batch", ret},
  1913. {"ret", ret},
  1914. });
  1915. for (auto & slot : slots) {
  1916. slot.state = SLOT_STATE_PROCESSING;
  1917. slot.command = SLOT_COMMAND_NONE;
  1918. slot.release();
  1919. send_error(slot, "Input prompt is too big compared to KV size. Please try increasing KV size.");
  1920. }
  1921. break; // break loop of n_batch
  1922. }
  1923. // retry with half the batch size to try to find a free slot in the KV cache
  1924. n_batch /= 2;
  1925. i -= n_batch;
  1926. LOG_WARNING("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation", {
  1927. {"i", i},
  1928. {"n_batch", n_batch},
  1929. {"ret", ret},
  1930. });
  1931. continue; // continue loop of n_batch
  1932. }
  1933. for (auto & slot : slots) {
  1934. if (slot.state != SLOT_STATE_PROCESSING || slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) {
  1935. continue; // continue loop of slots
  1936. }
  1937. // prompt evaluated for embedding
  1938. if (slot.embedding) {
  1939. send_embedding(slot, batch_view);
  1940. slot.release();
  1941. slot.i_batch = -1;
  1942. continue; // continue loop of slots
  1943. }
  1944. completion_token_output result;
  1945. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1946. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1947. slot.n_decoded += 1;
  1948. if (slot.n_decoded == 1) {
  1949. slot.t_start_generation = ggml_time_us();
  1950. slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3;
  1951. metrics.on_prompt_eval(slot);
  1952. }
  1953. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1954. result.tok = id;
  1955. const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
  1956. if (n_probs > 0) {
  1957. const size_t n_valid = slot.ctx_sampling->n_valid;
  1958. // Make sure at least n_probs top tokens are at the front of the vector:
  1959. if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
  1960. llama_sample_top_k(ctx, &cur_p, n_probs, 0);
  1961. }
  1962. if (slot.sparams.temp == 0.0f) {
  1963. // With greedy sampling the probabilities have possibly not been calculated.
  1964. for (size_t i = 0; i < n_probs; ++i) {
  1965. result.probs.push_back({
  1966. cur_p.data[i].id,
  1967. i == 0 ? 1.0f : 0.0f
  1968. });
  1969. }
  1970. } else {
  1971. for (size_t i = 0; i < n_probs; ++i) {
  1972. result.probs.push_back({
  1973. cur_p.data[i].id,
  1974. i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
  1975. });
  1976. }
  1977. }
  1978. }
  1979. if (!process_token(result, slot)) {
  1980. slot.release();
  1981. slot.print_timings();
  1982. send_final_response(slot);
  1983. metrics.on_prediction(slot);
  1984. }
  1985. slot.i_batch = -1;
  1986. }
  1987. }
  1988. LOG_VERBOSE("run slots completed", {});
  1989. }
  1990. json model_meta() const {
  1991. return json {
  1992. {"vocab_type", llama_vocab_type (model)},
  1993. {"n_vocab", llama_n_vocab (model)},
  1994. {"n_ctx_train", llama_n_ctx_train (model)},
  1995. {"n_embd", llama_n_embd (model)},
  1996. {"n_params", llama_model_n_params(model)},
  1997. {"size", llama_model_size (model)},
  1998. };
  1999. }
  2000. };
  2001. static void log_server_request(const httplib::Request & req, const httplib::Response & res) {
  2002. // skip GH copilot requests when using default port
  2003. if (req.path == "/v1/health" || req.path == "/v1/completions") {
  2004. return;
  2005. }
  2006. LOG_INFO("request", {
  2007. {"remote_addr", req.remote_addr},
  2008. {"remote_port", req.remote_port},
  2009. {"status", res.status},
  2010. {"method", req.method},
  2011. {"path", req.path},
  2012. {"params", req.params},
  2013. });
  2014. LOG_VERBOSE("request", {
  2015. {"request", req.body},
  2016. {"response", res.body},
  2017. });
  2018. }
  2019. std::function<void(int)> shutdown_handler;
  2020. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  2021. inline void signal_handler(int signal) {
  2022. if (is_terminating.test_and_set()) {
  2023. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  2024. // this is for better developer experience, we can remove when the server is stable enough
  2025. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  2026. exit(1);
  2027. }
  2028. shutdown_handler(signal);
  2029. }
  2030. int main(int argc, char ** argv) {
  2031. #if SERVER_VERBOSE != 1
  2032. log_disable();
  2033. #endif
  2034. // own arguments required by this example
  2035. gpt_params params;
  2036. if (!gpt_params_parse(argc, argv, params)) {
  2037. gpt_params_print_usage(argc, argv, params);
  2038. return 1;
  2039. }
  2040. // TODO: not great to use extern vars
  2041. server_log_json = params.log_json;
  2042. server_verbose = params.verbosity > 0;
  2043. // struct that contains llama context and inference
  2044. server_context ctx_server;
  2045. if (!params.system_prompt.empty()) {
  2046. ctx_server.system_prompt_set(params.system_prompt);
  2047. }
  2048. if (params.model_alias == "unknown") {
  2049. params.model_alias = params.model;
  2050. }
  2051. llama_backend_init();
  2052. llama_numa_init(params.numa);
  2053. LOG_INFO("build info", {
  2054. {"build", LLAMA_BUILD_NUMBER},
  2055. {"commit", LLAMA_COMMIT}
  2056. });
  2057. LOG_INFO("system info", {
  2058. {"n_threads", params.n_threads},
  2059. {"n_threads_batch", params.n_threads_batch},
  2060. {"total_threads", std::thread::hardware_concurrency()},
  2061. {"system_info", llama_print_system_info()},
  2062. });
  2063. std::unique_ptr<httplib::Server> svr;
  2064. #ifdef CPPHTTPLIB_OPENSSL_SUPPORT
  2065. if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
  2066. LOG_INFO("Running with SSL", {{"key", params.ssl_file_key}, {"cert", params.ssl_file_cert}});
  2067. svr.reset(
  2068. new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str())
  2069. );
  2070. } else {
  2071. LOG_INFO("Running without SSL", {});
  2072. svr.reset(new httplib::Server());
  2073. }
  2074. #else
  2075. svr.reset(new httplib::Server());
  2076. #endif
  2077. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2078. svr->set_default_headers({{"Server", "llama.cpp"}});
  2079. // CORS preflight
  2080. svr->Options(R"(.*)", [](const httplib::Request & req, httplib::Response & res) {
  2081. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2082. res.set_header("Access-Control-Allow-Credentials", "true");
  2083. res.set_header("Access-Control-Allow-Methods", "POST");
  2084. res.set_header("Access-Control-Allow-Headers", "*");
  2085. return res.set_content("", "application/json; charset=utf-8");
  2086. });
  2087. svr->set_logger(log_server_request);
  2088. auto res_error = [](httplib::Response & res, json error_data) {
  2089. json final_response {{"error", error_data}};
  2090. res.set_content(final_response.dump(), "application/json; charset=utf-8");
  2091. res.status = json_value(error_data, "code", 500);
  2092. };
  2093. svr->set_exception_handler([&res_error](const httplib::Request &, httplib::Response & res, std::exception_ptr ep) {
  2094. std::string message;
  2095. try {
  2096. std::rethrow_exception(std::move(ep));
  2097. } catch (std::exception & e) {
  2098. message = e.what();
  2099. } catch (...) {
  2100. message = "Unknown Exception";
  2101. }
  2102. json formatted_error = format_error_response(message, ERROR_TYPE_SERVER);
  2103. LOG_VERBOSE("Got exception", formatted_error);
  2104. res_error(res, formatted_error);
  2105. });
  2106. svr->set_error_handler([&res_error](const httplib::Request &, httplib::Response & res) {
  2107. if (res.status == 404) {
  2108. res_error(res, format_error_response("File Not Found", ERROR_TYPE_NOT_FOUND));
  2109. }
  2110. // for other error codes, we skip processing here because it's already done by res_error()
  2111. });
  2112. // set timeouts and change hostname and port
  2113. svr->set_read_timeout (params.timeout_read);
  2114. svr->set_write_timeout(params.timeout_write);
  2115. if (!svr->bind_to_port(params.hostname, params.port)) {
  2116. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", params.hostname.c_str(), params.port);
  2117. return 1;
  2118. }
  2119. std::unordered_map<std::string, std::string> log_data;
  2120. log_data["hostname"] = params.hostname;
  2121. log_data["port"] = std::to_string(params.port);
  2122. if (params.api_keys.size() == 1) {
  2123. auto key = params.api_keys[0];
  2124. log_data["api_key"] = "api_key: ****" + key.substr(std::max((int)(key.length() - 4), 0));
  2125. } else if (params.api_keys.size() > 1) {
  2126. log_data["api_key"] = "api_key: " + std::to_string(params.api_keys.size()) + " keys loaded";
  2127. }
  2128. // Necessary similarity of prompt for slot selection
  2129. ctx_server.slot_prompt_similarity = params.slot_prompt_similarity;
  2130. // load the model
  2131. if (!ctx_server.load_model(params)) {
  2132. state.store(SERVER_STATE_ERROR);
  2133. return 1;
  2134. } else {
  2135. ctx_server.init();
  2136. state.store(SERVER_STATE_READY);
  2137. }
  2138. LOG_INFO("model loaded", {});
  2139. const auto model_meta = ctx_server.model_meta();
  2140. // if a custom chat template is not supplied, we will use the one that comes with the model (if any)
  2141. if (params.chat_template.empty()) {
  2142. if (!ctx_server.validate_model_chat_template()) {
  2143. LOG_ERROR("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
  2144. params.chat_template = "chatml";
  2145. }
  2146. }
  2147. // print sample chat example to make it clear which template is used
  2148. {
  2149. json chat;
  2150. chat.push_back({{"role", "system"}, {"content", "You are a helpful assistant"}});
  2151. chat.push_back({{"role", "user"}, {"content", "Hello"}});
  2152. chat.push_back({{"role", "assistant"}, {"content", "Hi there"}});
  2153. chat.push_back({{"role", "user"}, {"content", "How are you?"}});
  2154. const std::string chat_example = format_chat(ctx_server.model, params.chat_template, chat);
  2155. LOG_INFO("chat template", {
  2156. {"chat_example", chat_example},
  2157. {"built_in", params.chat_template.empty()},
  2158. });
  2159. }
  2160. //
  2161. // Middlewares
  2162. //
  2163. auto middleware_validate_api_key = [&params, &res_error](const httplib::Request & req, httplib::Response & res) {
  2164. // TODO: should we apply API key to all endpoints, including "/health" and "/models"?
  2165. static const std::set<std::string> protected_endpoints = {
  2166. "/props",
  2167. "/completion",
  2168. "/completions",
  2169. "/v1/completions",
  2170. "/chat/completions",
  2171. "/v1/chat/completions",
  2172. "/infill",
  2173. "/tokenize",
  2174. "/detokenize",
  2175. "/embedding",
  2176. "/embeddings",
  2177. "/v1/embeddings",
  2178. };
  2179. // If API key is not set, skip validation
  2180. if (params.api_keys.empty()) {
  2181. return true;
  2182. }
  2183. // If path is not in protected_endpoints list, skip validation
  2184. if (protected_endpoints.find(req.path) == protected_endpoints.end()) {
  2185. return true;
  2186. }
  2187. // Check for API key in the header
  2188. auto auth_header = req.get_header_value("Authorization");
  2189. std::string prefix = "Bearer ";
  2190. if (auth_header.substr(0, prefix.size()) == prefix) {
  2191. std::string received_api_key = auth_header.substr(prefix.size());
  2192. if (std::find(params.api_keys.begin(), params.api_keys.end(), received_api_key) != params.api_keys.end()) {
  2193. return true; // API key is valid
  2194. }
  2195. }
  2196. // API key is invalid or not provided
  2197. // TODO: make another middleware for CORS related logic
  2198. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2199. res_error(res, format_error_response("Invalid API Key", ERROR_TYPE_AUTHENTICATION));
  2200. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2201. return false;
  2202. };
  2203. // register server middlewares
  2204. svr->set_pre_routing_handler([&middleware_validate_api_key](const httplib::Request & req, httplib::Response & res) {
  2205. if (!middleware_validate_api_key(req, res)) {
  2206. return httplib::Server::HandlerResponse::Handled;
  2207. }
  2208. return httplib::Server::HandlerResponse::Unhandled;
  2209. });
  2210. //
  2211. // Route handlers (or controllers)
  2212. //
  2213. const auto handle_health = [&](const httplib::Request & req, httplib::Response & res) {
  2214. server_state current_state = state.load();
  2215. switch (current_state) {
  2216. case SERVER_STATE_READY:
  2217. {
  2218. // request slots data using task queue
  2219. server_task task;
  2220. task.id = ctx_server.queue_tasks.get_new_id();
  2221. task.type = SERVER_TASK_TYPE_METRICS;
  2222. task.id_target = -1;
  2223. ctx_server.queue_results.add_waiting_task_id(task.id);
  2224. ctx_server.queue_tasks.post(task);
  2225. // get the result
  2226. server_task_result result = ctx_server.queue_results.recv(task.id);
  2227. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2228. const int n_idle_slots = result.data.at("idle");
  2229. const int n_processing_slots = result.data.at("processing");
  2230. json health = {
  2231. {"status", "ok"},
  2232. {"slots_idle", n_idle_slots},
  2233. {"slots_processing", n_processing_slots}
  2234. };
  2235. res.status = 200; // HTTP OK
  2236. if (params.endpoint_slots && req.has_param("include_slots")) {
  2237. health["slots"] = result.data.at("slots");
  2238. }
  2239. if (n_idle_slots == 0) {
  2240. health["status"] = "no slot available";
  2241. if (req.has_param("fail_on_no_slot")) {
  2242. res.status = 503; // HTTP Service Unavailable
  2243. }
  2244. }
  2245. res.set_content(health.dump(), "application/json");
  2246. break;
  2247. }
  2248. case SERVER_STATE_LOADING_MODEL:
  2249. {
  2250. res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE));
  2251. } break;
  2252. case SERVER_STATE_ERROR:
  2253. {
  2254. res_error(res, format_error_response("Model failed to load", ERROR_TYPE_SERVER));
  2255. } break;
  2256. }
  2257. };
  2258. const auto handle_slots = [&](const httplib::Request &, httplib::Response & res) {
  2259. if (!params.endpoint_slots) {
  2260. res_error(res, format_error_response("This server does not support slots endpoint.", ERROR_TYPE_NOT_SUPPORTED));
  2261. return;
  2262. }
  2263. // request slots data using task queue
  2264. server_task task;
  2265. task.id = ctx_server.queue_tasks.get_new_id();
  2266. task.id_multi = -1;
  2267. task.id_target = -1;
  2268. task.type = SERVER_TASK_TYPE_METRICS;
  2269. ctx_server.queue_results.add_waiting_task_id(task.id);
  2270. ctx_server.queue_tasks.post(task);
  2271. // get the result
  2272. server_task_result result = ctx_server.queue_results.recv(task.id);
  2273. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2274. res.set_content(result.data.at("slots").dump(), "application/json");
  2275. res.status = 200; // HTTP OK
  2276. };
  2277. const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) {
  2278. if (!params.endpoint_metrics) {
  2279. res_error(res, format_error_response("This server does not support metrics endpoint.", ERROR_TYPE_NOT_SUPPORTED));
  2280. return;
  2281. }
  2282. // request slots data using task queue
  2283. server_task task;
  2284. task.id = ctx_server.queue_tasks.get_new_id();
  2285. task.id_multi = -1;
  2286. task.id_target = -1;
  2287. task.type = SERVER_TASK_TYPE_METRICS;
  2288. task.data.push_back({{"reset_bucket", true}});
  2289. ctx_server.queue_results.add_waiting_task_id(task.id);
  2290. ctx_server.queue_tasks.post(task);
  2291. // get the result
  2292. server_task_result result = ctx_server.queue_results.recv(task.id);
  2293. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2294. json data = result.data;
  2295. const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed");
  2296. const uint64_t t_prompt_processing = data.at("t_prompt_processing");
  2297. const uint64_t n_tokens_predicted = data.at("n_tokens_predicted");
  2298. const uint64_t t_tokens_generation = data.at("t_tokens_generation");
  2299. const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells");
  2300. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2301. json all_metrics_def = json {
  2302. {"counter", {{
  2303. {"name", "prompt_tokens_total"},
  2304. {"help", "Number of prompt tokens processed."},
  2305. {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")}
  2306. }, {
  2307. {"name", "prompt_seconds_total"},
  2308. {"help", "Prompt process time"},
  2309. {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3}
  2310. }, {
  2311. {"name", "tokens_predicted_total"},
  2312. {"help", "Number of generation tokens processed."},
  2313. {"value", (uint64_t) data.at("n_tokens_predicted_total")}
  2314. }, {
  2315. {"name", "tokens_predicted_seconds_total"},
  2316. {"help", "Predict process time"},
  2317. {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3}
  2318. }}},
  2319. {"gauge", {{
  2320. {"name", "prompt_tokens_seconds"},
  2321. {"help", "Average prompt throughput in tokens/s."},
  2322. {"value", n_prompt_tokens_processed ? 1.e3 / t_prompt_processing * n_prompt_tokens_processed : 0.}
  2323. },{
  2324. {"name", "predicted_tokens_seconds"},
  2325. {"help", "Average generation throughput in tokens/s."},
  2326. {"value", n_tokens_predicted ? 1.e3 / t_tokens_generation * n_tokens_predicted : 0.}
  2327. },{
  2328. {"name", "kv_cache_usage_ratio"},
  2329. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2330. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2331. },{
  2332. {"name", "kv_cache_tokens"},
  2333. {"help", "KV-cache tokens."},
  2334. {"value", (uint64_t) data.at("kv_cache_tokens_count")}
  2335. },{
  2336. {"name", "requests_processing"},
  2337. {"help", "Number of request processing."},
  2338. {"value", (uint64_t) data.at("processing")}
  2339. },{
  2340. {"name", "requests_deferred"},
  2341. {"help", "Number of request deferred."},
  2342. {"value", (uint64_t) data.at("deferred")}
  2343. }}}
  2344. };
  2345. std::stringstream prometheus;
  2346. for (const auto & el : all_metrics_def.items()) {
  2347. const auto & type = el.key();
  2348. const auto & metrics_def = el.value();
  2349. for (const auto & metric_def : metrics_def) {
  2350. const std::string name = metric_def.at("name");
  2351. const std::string help = metric_def.at("help");
  2352. auto value = json_value(metric_def, "value", 0.);
  2353. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2354. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2355. << "llamacpp:" << name << " " << value << "\n";
  2356. }
  2357. }
  2358. const int64_t t_start = data.at("t_start");
  2359. res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
  2360. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2361. res.status = 200; // HTTP OK
  2362. };
  2363. const auto handle_slots_save = [&ctx_server, &res_error, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2364. json request_data = json::parse(req.body);
  2365. std::string filename = request_data.at("filename");
  2366. if (!fs_validate_filename(filename)) {
  2367. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2368. return;
  2369. }
  2370. std::string filepath = params.slot_save_path + filename;
  2371. server_task task;
  2372. task.type = SERVER_TASK_TYPE_SLOT_SAVE;
  2373. task.data = {
  2374. { "id_slot", id_slot },
  2375. { "filename", filename },
  2376. { "filepath", filepath }
  2377. };
  2378. const int id_task = ctx_server.queue_tasks.post(task);
  2379. ctx_server.queue_results.add_waiting_task_id(id_task);
  2380. server_task_result result = ctx_server.queue_results.recv(id_task);
  2381. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2382. if (result.error) {
  2383. res_error(res, result.data);
  2384. } else {
  2385. res.set_content(result.data.dump(), "application/json");
  2386. }
  2387. };
  2388. const auto handle_slots_restore = [&ctx_server, &res_error, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2389. json request_data = json::parse(req.body);
  2390. std::string filename = request_data.at("filename");
  2391. if (!fs_validate_filename(filename)) {
  2392. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2393. return;
  2394. }
  2395. std::string filepath = params.slot_save_path + filename;
  2396. server_task task;
  2397. task.type = SERVER_TASK_TYPE_SLOT_RESTORE;
  2398. task.data = {
  2399. { "id_slot", id_slot },
  2400. { "filename", filename },
  2401. { "filepath", filepath }
  2402. };
  2403. const int id_task = ctx_server.queue_tasks.post(task);
  2404. ctx_server.queue_results.add_waiting_task_id(id_task);
  2405. server_task_result result = ctx_server.queue_results.recv(id_task);
  2406. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2407. if (result.error) {
  2408. res_error(res, result.data);
  2409. } else {
  2410. res.set_content(result.data.dump(), "application/json");
  2411. }
  2412. };
  2413. const auto handle_slots_erase = [&ctx_server, &res_error](const httplib::Request & /* req */, httplib::Response & res, int id_slot) {
  2414. server_task task;
  2415. task.type = SERVER_TASK_TYPE_SLOT_ERASE;
  2416. task.data = {
  2417. { "id_slot", id_slot },
  2418. };
  2419. const int id_task = ctx_server.queue_tasks.post(task);
  2420. ctx_server.queue_results.add_waiting_task_id(id_task);
  2421. server_task_result result = ctx_server.queue_results.recv(id_task);
  2422. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2423. if (result.error) {
  2424. res_error(res, result.data);
  2425. } else {
  2426. res.set_content(result.data.dump(), "application/json");
  2427. }
  2428. };
  2429. const auto handle_slots_action = [&res_error, &handle_slots_save, &handle_slots_restore, &handle_slots_erase](const httplib::Request & req, httplib::Response & res) {
  2430. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2431. std::string id_slot_str = req.path_params.at("id_slot");
  2432. int id_slot;
  2433. try {
  2434. id_slot = std::stoi(id_slot_str);
  2435. } catch (const std::exception &) {
  2436. res_error(res, format_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST));
  2437. return;
  2438. }
  2439. std::string action = req.get_param_value("action");
  2440. if (action == "save") {
  2441. handle_slots_save(req, res, id_slot);
  2442. } else if (action == "restore") {
  2443. handle_slots_restore(req, res, id_slot);
  2444. } else if (action == "erase") {
  2445. handle_slots_erase(req, res, id_slot);
  2446. } else {
  2447. res_error(res, format_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST));
  2448. }
  2449. };
  2450. const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
  2451. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2452. json data = {
  2453. { "system_prompt", ctx_server.system_prompt.c_str() },
  2454. { "default_generation_settings", ctx_server.default_generation_settings_for_props },
  2455. { "total_slots", ctx_server.params.n_parallel }
  2456. };
  2457. res.set_content(data.dump(), "application/json; charset=utf-8");
  2458. };
  2459. const auto handle_completions = [&ctx_server, &res_error](const httplib::Request & req, httplib::Response & res) {
  2460. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2461. json data = json::parse(req.body);
  2462. const int id_task = ctx_server.queue_tasks.get_new_id();
  2463. ctx_server.queue_results.add_waiting_task_id(id_task);
  2464. ctx_server.request_completion(id_task, -1, data, false, false);
  2465. if (!json_value(data, "stream", false)) {
  2466. server_task_result result = ctx_server.queue_results.recv(id_task);
  2467. if (!result.error && result.stop) {
  2468. res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2469. } else {
  2470. res_error(res, result.data);
  2471. }
  2472. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2473. } else {
  2474. const auto chunked_content_provider = [id_task, &ctx_server](size_t, httplib::DataSink & sink) {
  2475. while (true) {
  2476. server_task_result result = ctx_server.queue_results.recv(id_task);
  2477. if (!result.error) {
  2478. const std::string str =
  2479. "data: " +
  2480. result.data.dump(-1, ' ', false, json::error_handler_t::replace) +
  2481. "\n\n";
  2482. LOG_VERBOSE("data stream", {
  2483. { "to_send", str }
  2484. });
  2485. if (!sink.write(str.c_str(), str.size())) {
  2486. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2487. return false;
  2488. }
  2489. if (result.stop) {
  2490. break;
  2491. }
  2492. } else {
  2493. const std::string str =
  2494. "error: " +
  2495. result.data.dump(-1, ' ', false, json::error_handler_t::replace) +
  2496. "\n\n";
  2497. LOG_VERBOSE("data stream", {
  2498. { "to_send", str }
  2499. });
  2500. if (!sink.write(str.c_str(), str.size())) {
  2501. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2502. return false;
  2503. }
  2504. break;
  2505. }
  2506. }
  2507. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2508. sink.done();
  2509. return true;
  2510. };
  2511. auto on_complete = [id_task, &ctx_server] (bool) {
  2512. // cancel
  2513. ctx_server.request_cancel(id_task);
  2514. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2515. };
  2516. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2517. }
  2518. };
  2519. const auto handle_models = [&params, &model_meta](const httplib::Request & req, httplib::Response & res) {
  2520. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2521. json models = {
  2522. {"object", "list"},
  2523. {"data", {
  2524. {
  2525. {"id", params.model_alias},
  2526. {"object", "model"},
  2527. {"created", std::time(0)},
  2528. {"owned_by", "llamacpp"},
  2529. {"meta", model_meta}
  2530. },
  2531. }}
  2532. };
  2533. res.set_content(models.dump(), "application/json; charset=utf-8");
  2534. };
  2535. const auto handle_chat_completions = [&ctx_server, &params, &res_error](const httplib::Request & req, httplib::Response & res) {
  2536. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2537. json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
  2538. const int id_task = ctx_server.queue_tasks.get_new_id();
  2539. ctx_server.queue_results.add_waiting_task_id(id_task);
  2540. ctx_server.request_completion(id_task, -1, data, false, false);
  2541. const auto completion_id = gen_chatcmplid();
  2542. if (!json_value(data, "stream", false)) {
  2543. server_task_result result = ctx_server.queue_results.recv(id_task);
  2544. if (!result.error && result.stop) {
  2545. json result_oai = format_final_response_oaicompat(data, result.data, completion_id);
  2546. res.set_content(result_oai.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2547. } else {
  2548. res_error(res, result.data);
  2549. }
  2550. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2551. } else {
  2552. const auto chunked_content_provider = [id_task, &ctx_server, completion_id](size_t, httplib::DataSink & sink) {
  2553. while (true) {
  2554. server_task_result result = ctx_server.queue_results.recv(id_task);
  2555. if (!result.error) {
  2556. std::vector<json> result_array = format_partial_response_oaicompat(result.data, completion_id);
  2557. for (auto it = result_array.begin(); it != result_array.end(); ++it) {
  2558. if (!it->empty()) {
  2559. const std::string str =
  2560. "data: " +
  2561. it->dump(-1, ' ', false, json::error_handler_t::replace) +
  2562. "\n\n";
  2563. LOG_VERBOSE("data stream", {{"to_send", str}});
  2564. if (!sink.write(str.c_str(), str.size())) {
  2565. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2566. return false;
  2567. }
  2568. }
  2569. }
  2570. if (result.stop) {
  2571. break;
  2572. }
  2573. } else {
  2574. const std::string str =
  2575. "error: " +
  2576. result.data.dump(-1, ' ', false, json::error_handler_t::replace) +
  2577. "\n\n";
  2578. LOG_VERBOSE("data stream", {{"to_send", str}});
  2579. if (!sink.write(str.c_str(), str.size())) {
  2580. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2581. return false;
  2582. }
  2583. break;
  2584. }
  2585. }
  2586. sink.done();
  2587. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2588. return true;
  2589. };
  2590. auto on_complete = [id_task, &ctx_server](bool) {
  2591. // cancel request
  2592. ctx_server.request_cancel(id_task);
  2593. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2594. };
  2595. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2596. }
  2597. };
  2598. const auto handle_infill = [&ctx_server, &res_error](const httplib::Request & req, httplib::Response & res) {
  2599. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2600. json data = json::parse(req.body);
  2601. const int id_task = ctx_server.queue_tasks.get_new_id();
  2602. ctx_server.queue_results.add_waiting_task_id(id_task);
  2603. ctx_server.request_completion(id_task, -1, data, true, false);
  2604. if (!json_value(data, "stream", false)) {
  2605. server_task_result result = ctx_server.queue_results.recv(id_task);
  2606. if (!result.error && result.stop) {
  2607. res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2608. } else {
  2609. res_error(res, result.data);
  2610. }
  2611. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2612. } else {
  2613. const auto chunked_content_provider = [id_task, &ctx_server](size_t, httplib::DataSink & sink) {
  2614. while (true) {
  2615. server_task_result result = ctx_server.queue_results.recv(id_task);
  2616. if (!result.error) {
  2617. const std::string str =
  2618. "data: " +
  2619. result.data.dump(-1, ' ', false, json::error_handler_t::replace) +
  2620. "\n\n";
  2621. LOG_VERBOSE("data stream", {
  2622. { "to_send", str }
  2623. });
  2624. if (!sink.write(str.c_str(), str.size())) {
  2625. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2626. return false;
  2627. }
  2628. if (result.stop) {
  2629. break;
  2630. }
  2631. } else {
  2632. break;
  2633. }
  2634. }
  2635. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2636. sink.done();
  2637. return true;
  2638. };
  2639. auto on_complete = [id_task, &ctx_server] (bool) {
  2640. ctx_server.request_cancel(id_task);
  2641. };
  2642. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2643. }
  2644. };
  2645. const auto handle_tokenize = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
  2646. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2647. const json body = json::parse(req.body);
  2648. std::vector<llama_token> tokens;
  2649. if (body.count("content") != 0) {
  2650. const bool add_special = json_value(body, "add_special", false);
  2651. tokens = ctx_server.tokenize(body.at("content"), add_special);
  2652. }
  2653. const json data = format_tokenizer_response(tokens);
  2654. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2655. };
  2656. const auto handle_detokenize = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
  2657. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2658. const json body = json::parse(req.body);
  2659. std::string content;
  2660. if (body.count("tokens") != 0) {
  2661. const std::vector<llama_token> tokens = body.at("tokens");
  2662. content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
  2663. }
  2664. const json data = format_detokenized_response(content);
  2665. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2666. };
  2667. const auto handle_embeddings = [&params, &ctx_server, &res_error](const httplib::Request & req, httplib::Response & res) {
  2668. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2669. if (!params.embedding) {
  2670. res.status = 501;
  2671. res.set_content("This server does not support embeddings. Start it with `--embeddings`", "text/plain; charset=utf-8");
  2672. return;
  2673. }
  2674. const json body = json::parse(req.body);
  2675. bool is_openai = false;
  2676. // an input prompt can be a string or a list of tokens (integer)
  2677. json prompt;
  2678. if (body.count("input") != 0) {
  2679. is_openai = true;
  2680. prompt = body.at("input");
  2681. } else if (body.count("content") != 0) {
  2682. // with "content", we only support single prompt
  2683. prompt = std::vector<std::string>{body.at("content")};
  2684. } else {
  2685. res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2686. return;
  2687. }
  2688. // create and queue the task
  2689. json responses;
  2690. {
  2691. const int id_task = ctx_server.queue_tasks.get_new_id();
  2692. ctx_server.queue_results.add_waiting_task_id(id_task);
  2693. ctx_server.request_completion(id_task, -1, {{"prompt", prompt}}, false, true);
  2694. // get the result
  2695. server_task_result result = ctx_server.queue_results.recv(id_task);
  2696. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2697. if (!result.error) {
  2698. if (result.data.count("results")) {
  2699. // result for multi-task
  2700. responses = result.data.at("results");
  2701. } else {
  2702. // result for single task
  2703. responses = std::vector<json>{result.data};
  2704. }
  2705. } else {
  2706. // error received, ignore everything else
  2707. res_error(res, result.data);
  2708. return;
  2709. }
  2710. }
  2711. // write JSON response
  2712. json root = is_openai
  2713. ? format_embeddings_response_oaicompat(body, responses)
  2714. : responses[0];
  2715. return res.set_content(root.dump(), "application/json; charset=utf-8");
  2716. };
  2717. auto handle_static_file = [](unsigned char * content, size_t len, const char * mime_type) {
  2718. return [content, len, mime_type](const httplib::Request &, httplib::Response & res) {
  2719. res.set_content(reinterpret_cast<const char*>(content), len, mime_type);
  2720. return false;
  2721. };
  2722. };
  2723. //
  2724. // Router
  2725. //
  2726. // register static assets routes
  2727. if (!params.public_path.empty()) {
  2728. // Set the base directory for serving static files
  2729. svr->set_base_dir(params.public_path);
  2730. }
  2731. // using embedded static files
  2732. svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8"));
  2733. svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8"));
  2734. svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8"));
  2735. svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8"));
  2736. // add new-ui files
  2737. svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8"));
  2738. svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8"));
  2739. svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8"));
  2740. svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8"));
  2741. svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8"));
  2742. svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8"));
  2743. svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8"));
  2744. svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8"));
  2745. svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8"));
  2746. svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8"));
  2747. svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8"));
  2748. // register API routes
  2749. svr->Get ("/health", handle_health);
  2750. svr->Get ("/slots", handle_slots);
  2751. svr->Get ("/metrics", handle_metrics);
  2752. svr->Get ("/props", handle_props);
  2753. svr->Get ("/v1/models", handle_models);
  2754. svr->Post("/completion", handle_completions); // legacy
  2755. svr->Post("/completions", handle_completions);
  2756. svr->Post("/v1/completions", handle_completions);
  2757. svr->Post("/chat/completions", handle_chat_completions);
  2758. svr->Post("/v1/chat/completions", handle_chat_completions);
  2759. svr->Post("/infill", handle_infill);
  2760. svr->Post("/embedding", handle_embeddings); // legacy
  2761. svr->Post("/embeddings", handle_embeddings);
  2762. svr->Post("/v1/embeddings", handle_embeddings);
  2763. svr->Post("/tokenize", handle_tokenize);
  2764. svr->Post("/detokenize", handle_detokenize);
  2765. if (!params.slot_save_path.empty()) {
  2766. // only enable slot endpoints if slot_save_path is set
  2767. svr->Post("/slots/:id_slot", handle_slots_action);
  2768. }
  2769. //
  2770. // Start the server
  2771. //
  2772. if (params.n_threads_http < 1) {
  2773. // +2 threads for monitoring endpoints
  2774. params.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2775. }
  2776. log_data["n_threads_http"] = std::to_string(params.n_threads_http);
  2777. svr->new_task_queue = [&params] { return new httplib::ThreadPool(params.n_threads_http); };
  2778. LOG_INFO("HTTP server listening", log_data);
  2779. // run the HTTP server in a thread - see comment below
  2780. std::thread t([&]() {
  2781. if (!svr->listen_after_bind()) {
  2782. state.store(SERVER_STATE_ERROR);
  2783. return 1;
  2784. }
  2785. return 0;
  2786. });
  2787. ctx_server.queue_tasks.on_new_task(std::bind(
  2788. &server_context::process_single_task, &ctx_server, std::placeholders::_1));
  2789. ctx_server.queue_tasks.on_finish_multitask(std::bind(
  2790. &server_context::on_finish_multitask, &ctx_server, std::placeholders::_1));
  2791. ctx_server.queue_tasks.on_update_slots(std::bind(
  2792. &server_context::update_slots, &ctx_server));
  2793. ctx_server.queue_results.on_multitask_update(std::bind(
  2794. &server_queue::update_multitask,
  2795. &ctx_server.queue_tasks,
  2796. std::placeholders::_1,
  2797. std::placeholders::_2,
  2798. std::placeholders::_3
  2799. ));
  2800. shutdown_handler = [&](int) {
  2801. ctx_server.queue_tasks.terminate();
  2802. };
  2803. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2804. struct sigaction sigint_action;
  2805. sigint_action.sa_handler = signal_handler;
  2806. sigemptyset (&sigint_action.sa_mask);
  2807. sigint_action.sa_flags = 0;
  2808. sigaction(SIGINT, &sigint_action, NULL);
  2809. sigaction(SIGTERM, &sigint_action, NULL);
  2810. #elif defined (_WIN32)
  2811. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2812. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2813. };
  2814. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2815. #endif
  2816. ctx_server.queue_tasks.start_loop();
  2817. svr->stop();
  2818. t.join();
  2819. llama_backend_free();
  2820. return 0;
  2821. }