server.cpp 134 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338
  1. #include "utils.hpp"
  2. #include "arg.h"
  3. #include "common.h"
  4. #include "log.h"
  5. #include "sampling.h"
  6. #include "json-schema-to-grammar.h"
  7. #include "llama.h"
  8. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  9. #define JSON_ASSERT GGML_ASSERT
  10. #include "json.hpp"
  11. // mime type for sending response
  12. #define MIMETYPE_JSON "application/json; charset=utf-8"
  13. // auto generated files (update with ./deps.sh)
  14. #include "colorthemes.css.hpp"
  15. #include "style.css.hpp"
  16. #include "theme-beeninorder.css.hpp"
  17. #include "theme-ketivah.css.hpp"
  18. #include "theme-mangotango.css.hpp"
  19. #include "theme-playground.css.hpp"
  20. #include "theme-polarnight.css.hpp"
  21. #include "theme-snowstorm.css.hpp"
  22. #include "index.html.hpp"
  23. #include "index-new.html.hpp"
  24. #include "index.js.hpp"
  25. #include "completion.js.hpp"
  26. #include "system-prompts.js.hpp"
  27. #include "prompt-formats.js.hpp"
  28. #include "json-schema-to-grammar.mjs.hpp"
  29. #include "loading.html.hpp"
  30. #include <atomic>
  31. #include <condition_variable>
  32. #include <cstddef>
  33. #include <cinttypes>
  34. #include <deque>
  35. #include <memory>
  36. #include <mutex>
  37. #include <signal.h>
  38. #include <thread>
  39. #include <unordered_map>
  40. #include <unordered_set>
  41. #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  42. #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  43. #define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  44. #define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  45. #define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  46. #define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  47. #define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  48. #define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  49. #define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  50. #define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  51. #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  52. #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  53. using json = nlohmann::ordered_json;
  54. enum stop_type {
  55. STOP_TYPE_FULL,
  56. STOP_TYPE_PARTIAL,
  57. };
  58. // state diagram: https://github.com/ggerganov/llama.cpp/pull/9283
  59. enum slot_state {
  60. SLOT_STATE_IDLE,
  61. SLOT_STATE_PROCESSING_PROMPT,
  62. SLOT_STATE_DONE_PROMPT,
  63. SLOT_STATE_GENERATING,
  64. };
  65. enum server_state {
  66. SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
  67. SERVER_STATE_READY, // Server is ready and model is loaded
  68. };
  69. enum server_task_type {
  70. SERVER_TASK_TYPE_COMPLETION,
  71. SERVER_TASK_TYPE_CANCEL,
  72. SERVER_TASK_TYPE_NEXT_RESPONSE,
  73. SERVER_TASK_TYPE_METRICS,
  74. SERVER_TASK_TYPE_SLOT_SAVE,
  75. SERVER_TASK_TYPE_SLOT_RESTORE,
  76. SERVER_TASK_TYPE_SLOT_ERASE,
  77. SERVER_TASK_TYPE_SET_LORA,
  78. };
  79. enum server_task_cmpl_type {
  80. SERVER_TASK_CMPL_TYPE_NORMAL,
  81. SERVER_TASK_CMPL_TYPE_EMBEDDING,
  82. SERVER_TASK_CMPL_TYPE_RERANK,
  83. SERVER_TASK_CMPL_TYPE_INFILL,
  84. };
  85. struct server_task {
  86. int id = -1; // to be filled by server_queue
  87. int id_target = -1; // used by SERVER_TASK_TYPE_CANCEL
  88. server_task_type type;
  89. json data;
  90. server_task_cmpl_type cmpl_type = SERVER_TASK_CMPL_TYPE_NORMAL;
  91. // utility function
  92. static std::unordered_set<int> get_list_id(const std::vector<server_task> & tasks) {
  93. std::unordered_set<int> ids(tasks.size());
  94. for (size_t i = 0; i < tasks.size(); i++) {
  95. ids.insert(tasks[i].id);
  96. }
  97. return ids;
  98. }
  99. };
  100. struct server_task_result {
  101. int id = -1;
  102. json data;
  103. bool stop;
  104. bool error;
  105. };
  106. struct slot_params {
  107. bool stream = true;
  108. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  109. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  110. int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
  111. int32_t n_predict = -1; // new tokens to predict
  112. std::vector<std::string> antiprompt;
  113. json input_prefix;
  114. json input_suffix;
  115. };
  116. struct server_slot {
  117. int id;
  118. int id_task = -1;
  119. // the index relative to completion multi-task request
  120. size_t index = 0;
  121. struct slot_params params;
  122. slot_state state = SLOT_STATE_IDLE;
  123. // used to determine the slot that has been used the longest
  124. int64_t t_last_used = -1;
  125. // generation props
  126. int32_t n_ctx = 0; // context size per slot
  127. int32_t n_past = 0;
  128. int32_t n_decoded = 0;
  129. int32_t n_remaining = -1;
  130. int32_t i_batch = -1;
  131. int32_t n_predict = -1; // TODO: disambiguate from params.n_predict
  132. int32_t n_prompt_tokens = 0;
  133. int32_t n_prompt_tokens_processed = 0;
  134. json prompt; // can be either a string, array of strings or array of token ids
  135. // when a task is submitted, we first tokenize the prompt and store it here
  136. std::vector<llama_token> prompt_tokens;
  137. std::string generated_text;
  138. std::vector<llama_token> cache_tokens;
  139. std::vector<completion_token_output> generated_token_probs;
  140. server_task_cmpl_type cmpl_type = SERVER_TASK_CMPL_TYPE_NORMAL;
  141. bool has_next_token = true;
  142. bool truncated = false;
  143. bool stopped_eos = false;
  144. bool stopped_word = false;
  145. bool stopped_limit = false;
  146. bool oaicompat = false;
  147. std::string oaicompat_model;
  148. std::string stopping_word;
  149. // sampling
  150. json json_schema;
  151. struct common_sampler_params sparams;
  152. struct common_sampler * smpl = nullptr;
  153. llama_token sampled;
  154. int32_t ga_i = 0; // group-attention state
  155. int32_t ga_n = 1; // group-attention factor
  156. int32_t ga_w = 512; // group-attention width
  157. int32_t n_past_se = 0; // self-extend
  158. // stats
  159. size_t n_sent_text = 0; // number of sent text character
  160. size_t n_sent_token_probs = 0;
  161. int64_t t_start_process_prompt;
  162. int64_t t_start_generation;
  163. double t_prompt_processing; // ms
  164. double t_token_generation; // ms
  165. std::function<void(int)> callback_on_release;
  166. void reset() {
  167. SLT_DBG(*this, "%s", "\n");
  168. n_prompt_tokens = 0;
  169. generated_text = "";
  170. truncated = false;
  171. stopped_eos = false;
  172. stopped_word = false;
  173. stopped_limit = false;
  174. stopping_word = "";
  175. n_past = 0;
  176. n_sent_text = 0;
  177. n_sent_token_probs = 0;
  178. cmpl_type = SERVER_TASK_CMPL_TYPE_NORMAL;
  179. ga_i = 0;
  180. n_past_se = 0;
  181. generated_token_probs.clear();
  182. }
  183. bool has_budget(common_params &global_params) {
  184. if (params.n_predict == -1 && global_params.n_predict == -1) {
  185. return true; // limitless
  186. }
  187. n_remaining = -1;
  188. if (params.n_predict != -1) {
  189. n_remaining = params.n_predict - n_decoded;
  190. } else if (global_params.n_predict != -1) {
  191. n_remaining = global_params.n_predict - n_decoded;
  192. }
  193. return n_remaining > 0; // no budget
  194. }
  195. bool is_processing() const {
  196. return state != SLOT_STATE_IDLE;
  197. }
  198. void add_token(const completion_token_output & token) {
  199. if (!is_processing()) {
  200. SLT_WRN(*this, "%s", "slot is not processing\n");
  201. return;
  202. }
  203. generated_token_probs.push_back(token);
  204. }
  205. void release() {
  206. if (is_processing()) {
  207. SLT_INF(*this, "stop processing: n_past = %d, truncated = %d\n", n_past, truncated);
  208. t_token_generation = (ggml_time_us() - t_start_generation) / 1e3;
  209. state = SLOT_STATE_IDLE;
  210. callback_on_release(id);
  211. }
  212. }
  213. json get_formated_timings() const {
  214. return json {
  215. {"prompt_n", n_prompt_tokens_processed},
  216. {"prompt_ms", t_prompt_processing},
  217. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  218. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  219. {"predicted_n", n_decoded},
  220. {"predicted_ms", t_token_generation},
  221. {"predicted_per_token_ms", t_token_generation / n_decoded},
  222. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  223. };
  224. }
  225. size_t find_stopping_strings(const std::string & text, const size_t last_token_size, const stop_type type) {
  226. size_t stop_pos = std::string::npos;
  227. for (const std::string & word : params.antiprompt) {
  228. size_t pos;
  229. if (type == STOP_TYPE_FULL) {
  230. const size_t tmp = word.size() + last_token_size;
  231. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  232. pos = text.find(word, from_pos);
  233. } else {
  234. pos = find_partial_stop_string(word, text);
  235. }
  236. if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) {
  237. if (type == STOP_TYPE_FULL) {
  238. stopped_word = true;
  239. stopping_word = word;
  240. has_next_token = false;
  241. }
  242. stop_pos = pos;
  243. }
  244. }
  245. return stop_pos;
  246. }
  247. void print_timings() const {
  248. const double t_prompt = t_prompt_processing / n_prompt_tokens_processed;
  249. const double n_prompt_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  250. const double t_gen = t_token_generation / n_decoded;
  251. const double n_gen_second = 1e3 / t_token_generation * n_decoded;
  252. SLT_INF(*this,
  253. "\n"
  254. "\rprompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  255. "\r eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n"
  256. "\r total time = %10.2f ms / %5d tokens\n",
  257. t_prompt_processing, n_prompt_tokens_processed, t_prompt, n_prompt_second,
  258. t_token_generation, n_decoded, t_gen, n_gen_second,
  259. t_prompt_processing + t_token_generation, n_prompt_tokens_processed + n_decoded);
  260. }
  261. };
  262. struct server_metrics {
  263. int64_t t_start = 0;
  264. uint64_t n_prompt_tokens_processed_total = 0;
  265. uint64_t t_prompt_processing_total = 0;
  266. uint64_t n_tokens_predicted_total = 0;
  267. uint64_t t_tokens_generation_total = 0;
  268. uint64_t n_prompt_tokens_processed = 0;
  269. uint64_t t_prompt_processing = 0;
  270. uint64_t n_tokens_predicted = 0;
  271. uint64_t t_tokens_generation = 0;
  272. uint64_t n_decode_total = 0;
  273. uint64_t n_busy_slots_total = 0;
  274. void init() {
  275. t_start = ggml_time_us();
  276. }
  277. void on_prompt_eval(const server_slot & slot) {
  278. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  279. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  280. t_prompt_processing += slot.t_prompt_processing;
  281. t_prompt_processing_total += slot.t_prompt_processing;
  282. }
  283. void on_prediction(const server_slot & slot) {
  284. n_tokens_predicted_total += slot.n_decoded;
  285. n_tokens_predicted += slot.n_decoded;
  286. t_tokens_generation += slot.t_token_generation;
  287. t_tokens_generation_total += slot.t_token_generation;
  288. }
  289. void on_decoded(const std::vector<server_slot> & slots) {
  290. n_decode_total++;
  291. for (const auto & slot : slots) {
  292. if (slot.is_processing()) {
  293. n_busy_slots_total++;
  294. }
  295. }
  296. }
  297. void reset_bucket() {
  298. n_prompt_tokens_processed = 0;
  299. t_prompt_processing = 0;
  300. n_tokens_predicted = 0;
  301. t_tokens_generation = 0;
  302. }
  303. };
  304. struct server_queue {
  305. int id = 0;
  306. bool running;
  307. // queues
  308. std::deque<server_task> queue_tasks;
  309. std::deque<server_task> queue_tasks_deferred;
  310. std::mutex mutex_tasks;
  311. std::condition_variable condition_tasks;
  312. // callback functions
  313. std::function<void(server_task&)> callback_new_task;
  314. std::function<void(void)> callback_update_slots;
  315. // Add a new task to the end of the queue
  316. int post(server_task task, bool front = false) {
  317. std::unique_lock<std::mutex> lock(mutex_tasks);
  318. if (task.id == -1) {
  319. task.id = id++;
  320. }
  321. QUE_DBG("new task, id = %d, front = %d\n", task.id, front);
  322. if (front) {
  323. queue_tasks.push_front(std::move(task));
  324. } else {
  325. queue_tasks.push_back(std::move(task));
  326. }
  327. condition_tasks.notify_one();
  328. return task.id;
  329. }
  330. // multi-task version of post()
  331. int post(std::vector<server_task> & tasks, bool front = false) {
  332. std::unique_lock<std::mutex> lock(mutex_tasks);
  333. for (auto & task : tasks) {
  334. if (task.id == -1) {
  335. task.id = id++;
  336. }
  337. QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int) tasks.size(), front);
  338. if (front) {
  339. queue_tasks.push_front(std::move(task));
  340. } else {
  341. queue_tasks.push_back(std::move(task));
  342. }
  343. }
  344. condition_tasks.notify_one();
  345. return 0;
  346. }
  347. // Add a new task, but defer until one slot is available
  348. void defer(server_task task) {
  349. std::unique_lock<std::mutex> lock(mutex_tasks);
  350. QUE_DBG("defer task, id = %d\n", task.id);
  351. queue_tasks_deferred.push_back(std::move(task));
  352. condition_tasks.notify_one();
  353. }
  354. // Get the next id for creating a new task
  355. int get_new_id() {
  356. std::unique_lock<std::mutex> lock(mutex_tasks);
  357. int new_id = id++;
  358. return new_id;
  359. }
  360. // Register function to process a new task
  361. void on_new_task(std::function<void(server_task &)> callback) {
  362. callback_new_task = std::move(callback);
  363. }
  364. // Register the function to be called when all slots data is ready to be processed
  365. void on_update_slots(std::function<void(void)> callback) {
  366. callback_update_slots = std::move(callback);
  367. }
  368. // Call when the state of one slot is changed, it will move one task from deferred to main queue
  369. void pop_deferred_task() {
  370. std::unique_lock<std::mutex> lock(mutex_tasks);
  371. if (!queue_tasks_deferred.empty()) {
  372. queue_tasks.emplace_back(std::move(queue_tasks_deferred.front()));
  373. queue_tasks_deferred.pop_front();
  374. }
  375. condition_tasks.notify_one();
  376. }
  377. // end the start_loop routine
  378. void terminate() {
  379. std::unique_lock<std::mutex> lock(mutex_tasks);
  380. running = false;
  381. condition_tasks.notify_all();
  382. }
  383. /**
  384. * Main loop consists of these steps:
  385. * - Wait until a new task arrives
  386. * - Process the task (i.e. maybe copy data into slot)
  387. * - Check if multitask is finished
  388. * - Update all slots
  389. */
  390. void start_loop() {
  391. running = true;
  392. while (true) {
  393. QUE_DBG("%s", "processing new tasks\n");
  394. while (true) {
  395. std::unique_lock<std::mutex> lock(mutex_tasks);
  396. if (queue_tasks.empty()) {
  397. lock.unlock();
  398. break;
  399. }
  400. server_task task = queue_tasks.front();
  401. queue_tasks.pop_front();
  402. lock.unlock();
  403. QUE_DBG("processing task, id = %d\n", task.id);
  404. callback_new_task(task);
  405. }
  406. // all tasks in the current loop is processed, slots data is now ready
  407. QUE_DBG("%s", "update slots\n");
  408. callback_update_slots();
  409. QUE_DBG("%s", "waiting for new tasks\n");
  410. {
  411. std::unique_lock<std::mutex> lock(mutex_tasks);
  412. if (queue_tasks.empty()) {
  413. if (!running) {
  414. QUE_DBG("%s", "terminate\n");
  415. return;
  416. }
  417. condition_tasks.wait(lock, [&]{
  418. return (!queue_tasks.empty() || !running);
  419. });
  420. }
  421. }
  422. }
  423. }
  424. };
  425. struct server_response {
  426. // for keeping track of all tasks waiting for the result
  427. std::unordered_set<int> waiting_task_ids;
  428. // the main result queue
  429. std::vector<server_task_result> queue_results;
  430. std::mutex mutex_results;
  431. std::condition_variable condition_results;
  432. // add the id_task to the list of tasks waiting for response
  433. void add_waiting_task_id(int id_task) {
  434. SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", id_task, (int) waiting_task_ids.size());
  435. std::unique_lock<std::mutex> lock(mutex_results);
  436. waiting_task_ids.insert(id_task);
  437. }
  438. void add_waiting_tasks(const std::vector<server_task> & tasks) {
  439. std::unique_lock<std::mutex> lock(mutex_results);
  440. for (const auto & task : tasks) {
  441. SRV_DBG("add task %d to waiting list. current waiting = %d (before add)\n", task.id, (int) waiting_task_ids.size());
  442. waiting_task_ids.insert(task.id);
  443. }
  444. }
  445. // when the request is finished, we can remove task associated with it
  446. void remove_waiting_task_id(int id_task) {
  447. SRV_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
  448. std::unique_lock<std::mutex> lock(mutex_results);
  449. waiting_task_ids.erase(id_task);
  450. }
  451. void remove_waiting_task_ids(const std::unordered_set<int> & id_tasks) {
  452. std::unique_lock<std::mutex> lock(mutex_results);
  453. for (const auto & id_task : id_tasks) {
  454. SRV_DBG("remove task %d from waiting list. current waiting = %d (before remove)\n", id_task, (int) waiting_task_ids.size());
  455. waiting_task_ids.erase(id_task);
  456. }
  457. }
  458. // This function blocks the thread until there is a response for one of the id_tasks
  459. server_task_result recv(const std::unordered_set<int> & id_tasks) {
  460. while (true) {
  461. std::unique_lock<std::mutex> lock(mutex_results);
  462. condition_results.wait(lock, [&]{
  463. return !queue_results.empty();
  464. });
  465. for (int i = 0; i < (int) queue_results.size(); i++) {
  466. if (id_tasks.find(queue_results[i].id) != id_tasks.end()) {
  467. server_task_result res = queue_results[i];
  468. queue_results.erase(queue_results.begin() + i);
  469. return res;
  470. }
  471. }
  472. }
  473. // should never reach here
  474. }
  475. // single-task version of recv()
  476. server_task_result recv(int id_task) {
  477. std::unordered_set<int> id_tasks = {id_task};
  478. return recv(id_tasks);
  479. }
  480. // Send a new result to a waiting id_task
  481. void send(server_task_result & result) {
  482. SRV_DBG("sending result for task id = %d\n", result.id);
  483. std::unique_lock<std::mutex> lock(mutex_results);
  484. for (const auto & id_task : waiting_task_ids) {
  485. if (result.id == id_task) {
  486. SRV_DBG("task id = %d moved to result queue\n", result.id);
  487. queue_results.push_back(std::move(result));
  488. condition_results.notify_all();
  489. return;
  490. }
  491. }
  492. }
  493. };
  494. struct server_context {
  495. llama_model * model = nullptr;
  496. llama_context * ctx = nullptr;
  497. std::vector<common_lora_adapter_container> loras;
  498. common_params params;
  499. llama_batch batch = {};
  500. bool clean_kv_cache = true;
  501. bool add_bos_token = true;
  502. bool has_eos_token = false;
  503. int32_t n_ctx; // total context for all clients / slots
  504. // slots / clients
  505. std::vector<server_slot> slots;
  506. json default_generation_settings_for_props;
  507. server_queue queue_tasks;
  508. server_response queue_results;
  509. server_metrics metrics;
  510. // Necessary similarity of prompt for slot selection
  511. float slot_prompt_similarity = 0.0f;
  512. ~server_context() {
  513. if (ctx) {
  514. llama_free(ctx);
  515. ctx = nullptr;
  516. }
  517. if (model) {
  518. llama_free_model(model);
  519. model = nullptr;
  520. }
  521. // Clear any sampling context
  522. for (server_slot & slot : slots) {
  523. if (slot.smpl != nullptr) {
  524. common_sampler_free(slot.smpl);
  525. }
  526. }
  527. llama_batch_free(batch);
  528. }
  529. bool load_model(const common_params & params_) {
  530. params = params_;
  531. // reserve one extra sequence (seq_id == 0) for extra features
  532. params.n_parallel += 1;
  533. common_init_result llama_init = common_init_from_params(params);
  534. model = llama_init.model;
  535. ctx = llama_init.context;
  536. loras = llama_init.lora_adapters;
  537. params.n_parallel -= 1; // but be sneaky about it
  538. if (model == nullptr) {
  539. SRV_ERR("failed to load model, '%s'\n", params.model.c_str());
  540. return false;
  541. }
  542. n_ctx = llama_n_ctx(ctx);
  543. add_bos_token = llama_add_bos_token(model);
  544. has_eos_token = !llama_add_eos_token(model);
  545. return true;
  546. }
  547. bool validate_model_chat_template() const {
  548. llama_chat_message chat[] = {{"user", "test"}};
  549. const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0);
  550. return res > 0;
  551. }
  552. void init() {
  553. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  554. SRV_INF("initializing slots, n_slots = %d\n", params.n_parallel);
  555. for (int i = 0; i < params.n_parallel; i++) {
  556. server_slot slot;
  557. slot.id = i;
  558. slot.n_ctx = n_ctx_slot;
  559. slot.n_predict = params.n_predict;
  560. SLT_INF(slot, "new slot n_ctx_slot = %d\n", slot.n_ctx);
  561. const int ga_n = params.grp_attn_n;
  562. const int ga_w = params.grp_attn_w;
  563. if (ga_n != 1) {
  564. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  565. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  566. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  567. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  568. SLT_INF(slot, "slot self-extend: ga_n = %d, ga_w = %d\n", ga_n, ga_w);
  569. }
  570. slot.ga_i = 0;
  571. slot.ga_n = ga_n;
  572. slot.ga_w = ga_w;
  573. slot.sparams = params.sparams;
  574. slot.callback_on_release = [this](int) {
  575. queue_tasks.pop_deferred_task();
  576. };
  577. slot.reset();
  578. slots.push_back(slot);
  579. }
  580. default_generation_settings_for_props = get_formated_generation(slots.front());
  581. default_generation_settings_for_props["seed"] = -1;
  582. // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
  583. // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
  584. {
  585. const int32_t n_batch = llama_n_batch(ctx);
  586. // only a single seq_id per token is needed
  587. batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
  588. }
  589. metrics.init();
  590. }
  591. std::vector<llama_token> tokenize(const json & json_prompt, bool add_special, bool parse_special) const {
  592. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  593. // or the first element of the json_prompt array is a string.
  594. std::vector<llama_token> prompt_tokens;
  595. if (json_prompt.is_array()) {
  596. bool first = true;
  597. for (const auto & p : json_prompt) {
  598. if (p.is_string()) {
  599. auto s = p.template get<std::string>();
  600. std::vector<llama_token> p;
  601. if (first) {
  602. p = common_tokenize(ctx, s, add_special, parse_special);
  603. first = false;
  604. } else {
  605. p = common_tokenize(ctx, s, false, parse_special);
  606. }
  607. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  608. } else {
  609. if (first) {
  610. first = false;
  611. }
  612. prompt_tokens.push_back(p.template get<llama_token>());
  613. }
  614. }
  615. } else {
  616. auto s = json_prompt.template get<std::string>();
  617. prompt_tokens = common_tokenize(ctx, s, add_special, parse_special);
  618. }
  619. return prompt_tokens;
  620. }
  621. server_slot * get_slot_by_id(int id) {
  622. for (server_slot & slot : slots) {
  623. if (slot.id == id) {
  624. return &slot;
  625. }
  626. }
  627. return nullptr;
  628. }
  629. server_slot * get_available_slot(const std::string & prompt) {
  630. server_slot * ret = nullptr;
  631. // find the slot that has at least n% prompt similarity
  632. if (ret == nullptr && slot_prompt_similarity != 0.0f && !prompt.empty()) {
  633. int max_lcp_len = 0;
  634. float similarity = 0;
  635. for (server_slot & slot : slots) {
  636. // skip the slot if it is not available
  637. if (slot.is_processing()) {
  638. continue;
  639. }
  640. // skip the slot if it does not contains prompt
  641. if (!slot.prompt.is_string()) {
  642. continue;
  643. }
  644. // current slot's prompt
  645. std::string slot_prompt = slot.prompt.get<std::string>();
  646. // length of the current slot's prompt
  647. int slot_prompt_len = slot_prompt.size();
  648. // length of the Longest Common Prefix between the current slot's prompt and the input prompt
  649. int lcp_len = common_part(slot_prompt, prompt);
  650. // fraction of the common substring length compared to the current slot's prompt length
  651. similarity = static_cast<float>(lcp_len) / slot_prompt_len;
  652. // select the current slot if the criteria match
  653. if (lcp_len > max_lcp_len && similarity > slot_prompt_similarity) {
  654. max_lcp_len = lcp_len;
  655. ret = &slot;
  656. }
  657. }
  658. if (ret != nullptr) {
  659. SLT_DBG(*ret, "selected slot by lcp similarity, max_lcp_len = %d, similarity = %f\n", max_lcp_len, similarity);
  660. }
  661. }
  662. // find the slot that has been least recently used
  663. if (ret == nullptr) {
  664. int64_t t_last = ggml_time_us();
  665. for (server_slot & slot : slots) {
  666. // skip the slot if it is not available
  667. if (slot.is_processing()) {
  668. continue;
  669. }
  670. // select the current slot if the criteria match
  671. if (slot.t_last_used < t_last) {
  672. t_last = slot.t_last_used;
  673. ret = &slot;
  674. }
  675. }
  676. if (ret != nullptr) {
  677. SLT_DBG(*ret, "selected slot by lru, t_last = %" PRId64 "\n", t_last);
  678. }
  679. }
  680. return ret;
  681. }
  682. bool launch_slot_with_task(server_slot & slot, const server_task & task) {
  683. slot_params default_params;
  684. // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them)
  685. auto default_sparams = params.sparams;
  686. const auto & data = task.data;
  687. if (data.count("__oaicompat") != 0) {
  688. slot.oaicompat = true;
  689. slot.oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  690. } else {
  691. slot.oaicompat = false;
  692. slot.oaicompat_model = "";
  693. }
  694. slot.params.stream = json_value(data, "stream", false);
  695. slot.params.cache_prompt = json_value(data, "cache_prompt", false);
  696. slot.params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", default_params.n_predict));
  697. slot.sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  698. slot.sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  699. slot.sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  700. slot.sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  701. slot.sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p);
  702. slot.sparams.temp = json_value(data, "temperature", default_sparams.temp);
  703. slot.sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  704. slot.sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  705. slot.sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  706. slot.sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  707. slot.sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  708. slot.sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  709. slot.sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  710. slot.sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  711. slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  712. slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  713. slot.params.n_keep = json_value(data, "n_keep", slot.params.n_keep);
  714. slot.params.n_discard = json_value(data, "n_discard", default_params.n_discard);
  715. slot.sparams.seed = json_value(data, "seed", default_sparams.seed);
  716. slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  717. slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  718. // process "json_schema" and "grammar"
  719. if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
  720. send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
  721. return false;
  722. }
  723. if (data.contains("json_schema") && !data.contains("grammar")) {
  724. try {
  725. auto schema = json_value(data, "json_schema", json::object());
  726. slot.sparams.grammar = json_schema_to_grammar(schema);
  727. } catch (const std::exception & e) {
  728. send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
  729. return false;
  730. }
  731. } else {
  732. slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  733. }
  734. if (slot.params.cache_prompt && slot.ga_n != 1) {
  735. slot.params.cache_prompt = false;
  736. SLT_WRN(slot, "%s", "group-attention is not supported with prompt caching. disabling cache\n");
  737. }
  738. if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) {
  739. // Might be better to reject the request with a 400 ?
  740. slot.params.n_predict = slot.n_predict;
  741. SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.n_predict, slot.n_predict);
  742. }
  743. // infill
  744. slot.params.input_prefix = json_value(data, "input_prefix", default_params.input_prefix);
  745. slot.params.input_suffix = json_value(data, "input_suffix", default_params.input_suffix);
  746. // get prompt
  747. if (task.cmpl_type != SERVER_TASK_CMPL_TYPE_INFILL) {
  748. const auto & prompt = data.find("prompt");
  749. if (prompt == data.end()) {
  750. send_error(task, "\"prompt\" must be provided", ERROR_TYPE_INVALID_REQUEST);
  751. return false;
  752. }
  753. if ((prompt->is_string()) ||
  754. (prompt->is_array() && prompt->size() == 1 && prompt->at(0).is_string()) ||
  755. (prompt->is_array() && !prompt->empty() && prompt->at(0).is_number_integer())) {
  756. slot.prompt = *prompt;
  757. } else if (prompt->is_array() && prompt->size() == 1 && prompt->at(0).is_array()) {
  758. slot.prompt = prompt->at(0);
  759. } else if (prompt->is_array() && prompt->size() > 1) {
  760. // array of strings
  761. for (const auto & el : *prompt) {
  762. if (!el.is_string()) {
  763. send_error(task, "\"prompt\" must be a string, an array of strings or an array of integers", ERROR_TYPE_INVALID_REQUEST);
  764. return false;
  765. }
  766. }
  767. slot.prompt = *prompt;
  768. } else {
  769. send_error(task, "\"prompt\" must be a string, an array of strings or an array of integers", ERROR_TYPE_INVALID_REQUEST);
  770. return false;
  771. }
  772. }
  773. {
  774. slot.sparams.logit_bias.clear();
  775. if (json_value(data, "ignore_eos", false) && has_eos_token) {
  776. slot.sparams.logit_bias.push_back({llama_token_eos(model), -INFINITY});
  777. }
  778. const auto & logit_bias = data.find("logit_bias");
  779. if (logit_bias != data.end() && logit_bias->is_array()) {
  780. const int n_vocab = llama_n_vocab(model);
  781. for (const auto & el : *logit_bias) {
  782. // TODO: we may want to throw errors here, in case "el" is incorrect
  783. if (el.is_array() && el.size() == 2) {
  784. float bias;
  785. if (el[1].is_number()) {
  786. bias = el[1].get<float>();
  787. } else if (el[1].is_boolean() && !el[1].get<bool>()) {
  788. bias = -INFINITY;
  789. } else {
  790. continue;
  791. }
  792. if (el[0].is_number_integer()) {
  793. llama_token tok = el[0].get<llama_token>();
  794. if (tok >= 0 && tok < n_vocab) {
  795. slot.sparams.logit_bias.push_back({tok, bias});
  796. }
  797. } else if (el[0].is_string()) {
  798. auto toks = common_tokenize(model, el[0].get<std::string>(), false);
  799. for (auto tok : toks) {
  800. slot.sparams.logit_bias.push_back({tok, bias});
  801. }
  802. }
  803. }
  804. }
  805. }
  806. }
  807. {
  808. slot.params.antiprompt.clear();
  809. const auto & stop = data.find("stop");
  810. if (stop != data.end() && stop->is_array()) {
  811. for (const auto & word : *stop) {
  812. if (!word.empty()) {
  813. slot.params.antiprompt.push_back(word);
  814. }
  815. }
  816. }
  817. }
  818. {
  819. const auto & samplers = data.find("samplers");
  820. if (samplers != data.end() && samplers->is_array()) {
  821. std::vector<std::string> sampler_names;
  822. for (const auto & name : *samplers) {
  823. if (name.is_string()) {
  824. sampler_names.emplace_back(name);
  825. }
  826. }
  827. slot.sparams.samplers = common_sampler_types_from_names(sampler_names, false);
  828. } else {
  829. slot.sparams.samplers = default_sparams.samplers;
  830. }
  831. }
  832. {
  833. if (slot.smpl != nullptr) {
  834. common_sampler_free(slot.smpl);
  835. }
  836. slot.smpl = common_sampler_init(model, slot.sparams);
  837. if (slot.smpl == nullptr) {
  838. // for now, the only error that may happen here is invalid grammar
  839. send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST);
  840. return false;
  841. }
  842. }
  843. slot.state = SLOT_STATE_PROCESSING_PROMPT;
  844. slot.prompt_tokens.clear();
  845. SLT_INF(slot, "%s", "processing task\n");
  846. return true;
  847. }
  848. void kv_cache_clear() {
  849. SRV_DBG("%s", "clearing KV cache\n");
  850. // clear the entire KV cache
  851. llama_kv_cache_clear(ctx);
  852. clean_kv_cache = false;
  853. }
  854. bool process_token(completion_token_output & result, server_slot & slot) {
  855. // remember which tokens were sampled - used for repetition penalties during sampling
  856. const std::string token_str = common_token_to_piece(ctx, result.tok, params.special);
  857. slot.sampled = result.tok;
  858. // search stop word and delete it
  859. slot.generated_text += token_str;
  860. slot.has_next_token = true;
  861. // check if there is incomplete UTF-8 character at the end
  862. bool incomplete = false;
  863. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) {
  864. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  865. if ((c & 0xC0) == 0x80) {
  866. // continuation byte: 10xxxxxx
  867. continue;
  868. }
  869. if ((c & 0xE0) == 0xC0) {
  870. // 2-byte character: 110xxxxx ...
  871. incomplete = i < 2;
  872. } else if ((c & 0xF0) == 0xE0) {
  873. // 3-byte character: 1110xxxx ...
  874. incomplete = i < 3;
  875. } else if ((c & 0xF8) == 0xF0) {
  876. // 4-byte character: 11110xxx ...
  877. incomplete = i < 4;
  878. }
  879. // else 1-byte character or invalid byte
  880. break;
  881. }
  882. if (!incomplete) {
  883. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  884. const std::string str_test = slot.generated_text.substr(pos);
  885. bool is_stop_full = false;
  886. size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_FULL);
  887. if (stop_pos != std::string::npos) {
  888. is_stop_full = true;
  889. slot.generated_text.erase(
  890. slot.generated_text.begin() + pos + stop_pos,
  891. slot.generated_text.end());
  892. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  893. } else {
  894. is_stop_full = false;
  895. stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_PARTIAL);
  896. }
  897. // check if there is any token to predict
  898. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0)) {
  899. // no send the stop word in the response
  900. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  901. slot.n_sent_text += result.text_to_send.size();
  902. // add the token to slot queue and cache
  903. }
  904. slot.add_token(result);
  905. if (slot.params.stream) {
  906. send_partial_response(slot, result);
  907. }
  908. }
  909. if (incomplete) {
  910. slot.has_next_token = true;
  911. }
  912. // check the limits
  913. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) {
  914. slot.stopped_limit = true;
  915. slot.has_next_token = false;
  916. SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict);
  917. }
  918. // if context shift is disabled, we stop when it reaches the context limit
  919. if (slot.n_decoded >= slot.n_ctx) {
  920. slot.truncated = true;
  921. slot.stopped_limit = true;
  922. slot.has_next_token = false;
  923. SLT_DBG(slot, "stopped due to running out of context capacity, n_decoded = %d, n_ctx = %d\n", slot.n_decoded, slot.n_ctx);
  924. }
  925. if (llama_token_is_eog(model, result.tok)) {
  926. slot.stopped_eos = true;
  927. slot.has_next_token = false;
  928. SLT_DBG(slot, "%s", "stopped by EOS\n");
  929. }
  930. const auto n_ctx_train = llama_n_ctx_train(model);
  931. if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.ga_n == 1 && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
  932. slot.truncated = true;
  933. slot.stopped_limit = true;
  934. slot.has_next_token = false; // stop prediction
  935. SLT_WRN(slot,
  936. "n_predict (%d) is not set and self-context extend is disabled. "
  937. "Limiting generated tokens to n_ctx_train (%d) to avoid EOS-less generation infinite loop\n",
  938. slot.params.n_predict, n_ctx_train);
  939. }
  940. SLT_DBG(slot, "n_decoded = %d, n_remaining = %d, next token: %5d '%s'\n", slot.n_decoded, slot.n_remaining, result.tok, token_str.c_str());
  941. return slot.has_next_token; // continue
  942. }
  943. json get_formated_generation(const server_slot & slot) const {
  944. std::vector<std::string> samplers;
  945. samplers.reserve(slot.sparams.samplers.size());
  946. for (const auto & sampler : slot.sparams.samplers) {
  947. samplers.emplace_back(common_sampler_type_to_str(sampler));
  948. }
  949. return json {
  950. {"n_ctx", slot.n_ctx},
  951. {"n_predict", slot.n_predict}, // Server configured n_predict
  952. {"model", params.model_alias},
  953. {"seed", slot.sparams.seed},
  954. {"seed_cur", slot.smpl ? common_sampler_get_seed(slot.smpl) : 0},
  955. {"temperature", slot.sparams.temp},
  956. {"dynatemp_range", slot.sparams.dynatemp_range},
  957. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  958. {"top_k", slot.sparams.top_k},
  959. {"top_p", slot.sparams.top_p},
  960. {"min_p", slot.sparams.min_p},
  961. {"tfs_z", slot.sparams.tfs_z},
  962. {"typical_p", slot.sparams.typ_p},
  963. {"repeat_last_n", slot.sparams.penalty_last_n},
  964. {"repeat_penalty", slot.sparams.penalty_repeat},
  965. {"presence_penalty", slot.sparams.penalty_present},
  966. {"frequency_penalty", slot.sparams.penalty_freq},
  967. {"mirostat", slot.sparams.mirostat},
  968. {"mirostat_tau", slot.sparams.mirostat_tau},
  969. {"mirostat_eta", slot.sparams.mirostat_eta},
  970. {"penalize_nl", slot.sparams.penalize_nl},
  971. {"stop", slot.params.antiprompt},
  972. {"max_tokens", slot.params.n_predict}, // User configured n_predict
  973. {"n_keep", slot.params.n_keep},
  974. {"n_discard", slot.params.n_discard},
  975. {"ignore_eos", slot.sparams.ignore_eos},
  976. {"stream", slot.params.stream},
  977. //{"logit_bias", slot.sparams.logit_bias},
  978. {"n_probs", slot.sparams.n_probs},
  979. {"min_keep", slot.sparams.min_keep},
  980. {"grammar", slot.sparams.grammar},
  981. {"samplers", samplers},
  982. };
  983. }
  984. void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  985. send_error(task.id, error, type);
  986. }
  987. void send_error(const server_slot & slot, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  988. send_error(slot.id_task, error, type);
  989. }
  990. void send_error(const int id_task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
  991. SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str());
  992. server_task_result res;
  993. res.id = id_task;
  994. res.stop = false;
  995. res.error = true;
  996. res.data = format_error_response(error, type);
  997. queue_results.send(res);
  998. }
  999. void send_partial_response(server_slot & slot, completion_token_output tkn) {
  1000. server_task_result res;
  1001. res.id = slot.id_task;
  1002. res.error = false;
  1003. res.stop = false;
  1004. res.data = json {
  1005. {"content", tkn.text_to_send},
  1006. {"stop", false},
  1007. {"id_slot", slot.id},
  1008. {"multimodal", false},
  1009. {"index", slot.index},
  1010. };
  1011. if (slot.sparams.n_probs > 0) {
  1012. const std::vector<llama_token> to_send_toks = common_tokenize(ctx, tkn.text_to_send, false);
  1013. const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  1014. const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  1015. std::vector<completion_token_output> probs_output;
  1016. if (probs_pos < probs_stop_pos) {
  1017. probs_output = std::vector<completion_token_output>(
  1018. slot.generated_token_probs.begin() + probs_pos,
  1019. slot.generated_token_probs.begin() + probs_stop_pos);
  1020. }
  1021. slot.n_sent_token_probs = probs_stop_pos;
  1022. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  1023. }
  1024. if (slot.oaicompat) {
  1025. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1026. res.data["model"] = slot.oaicompat_model;
  1027. }
  1028. queue_results.send(res);
  1029. }
  1030. void send_final_response(const server_slot & slot) {
  1031. server_task_result res;
  1032. res.id = slot.id_task;
  1033. res.error = false;
  1034. res.stop = true;
  1035. res.data = json {
  1036. {"content", !slot.params.stream ? slot.generated_text : ""},
  1037. {"id_slot", slot.id},
  1038. {"stop", true},
  1039. {"model", params.model_alias},
  1040. {"tokens_predicted", slot.n_decoded},
  1041. {"tokens_evaluated", slot.n_prompt_tokens},
  1042. {"generation_settings", get_formated_generation(slot)},
  1043. {"prompt", slot.prompt},
  1044. {"truncated", slot.truncated},
  1045. {"stopped_eos", slot.stopped_eos},
  1046. {"stopped_word", slot.stopped_word},
  1047. {"stopped_limit", slot.stopped_limit},
  1048. {"stopping_word", slot.stopping_word},
  1049. {"tokens_cached", slot.n_past},
  1050. {"timings", slot.get_formated_timings()},
  1051. {"index", slot.index},
  1052. };
  1053. if (slot.sparams.n_probs > 0) {
  1054. std::vector<completion_token_output> probs;
  1055. if (!slot.params.stream && slot.stopped_word) {
  1056. const std::vector<llama_token> stop_word_toks = common_tokenize(ctx, slot.stopping_word, false);
  1057. size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
  1058. probs = std::vector<completion_token_output>(
  1059. slot.generated_token_probs.begin(),
  1060. slot.generated_token_probs.end() - safe_offset);
  1061. } else {
  1062. probs = std::vector<completion_token_output>(
  1063. slot.generated_token_probs.begin(),
  1064. slot.generated_token_probs.end());
  1065. }
  1066. res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1067. }
  1068. if (slot.oaicompat) {
  1069. res.data["oaicompat_token_ctr"] = slot.n_decoded;
  1070. res.data["model"] = slot.oaicompat_model;
  1071. }
  1072. queue_results.send(res);
  1073. }
  1074. void send_embedding(const server_slot & slot, const llama_batch & batch) {
  1075. server_task_result res;
  1076. res.id = slot.id_task;
  1077. res.error = false;
  1078. res.stop = true;
  1079. const int n_embd = llama_n_embd(model);
  1080. std::vector<float> embd_res(n_embd, 0.0f);
  1081. for (int i = 0; i < batch.n_tokens; ++i) {
  1082. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id + 1) {
  1083. continue;
  1084. }
  1085. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1086. if (embd == NULL) {
  1087. embd = llama_get_embeddings_ith(ctx, i);
  1088. }
  1089. if (embd == NULL) {
  1090. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1091. res.data = json {
  1092. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1093. {"index", slot.index},
  1094. };
  1095. continue;
  1096. }
  1097. common_embd_normalize(embd, embd_res.data(), n_embd);
  1098. res.data = json {
  1099. {"embedding", embd_res},
  1100. {"index", slot.index},
  1101. };
  1102. }
  1103. SLT_DBG(slot, "%s", "sending embeddings\n");
  1104. queue_results.send(res);
  1105. }
  1106. void send_rerank(const server_slot & slot, const llama_batch & batch) {
  1107. server_task_result res;
  1108. res.id = slot.id_task;
  1109. res.error = false;
  1110. res.stop = true;
  1111. for (int i = 0; i < batch.n_tokens; ++i) {
  1112. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id + 1) {
  1113. continue;
  1114. }
  1115. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1116. if (embd == NULL) {
  1117. embd = llama_get_embeddings_ith(ctx, i);
  1118. }
  1119. if (embd == NULL) {
  1120. SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]);
  1121. res.data = json {
  1122. {"index", slot.index},
  1123. {"score", -1e6},
  1124. };
  1125. continue;
  1126. }
  1127. res.data = json {
  1128. {"index", slot.index},
  1129. {"score", embd[0]},
  1130. };
  1131. }
  1132. SLT_DBG(slot, "sending rerank result, res = '%s'\n", res.data.dump().c_str());
  1133. queue_results.send(res);
  1134. }
  1135. //
  1136. // Functions to create new task(s) and receive result(s)
  1137. //
  1138. std::vector<server_task> create_tasks_cmpl(json data, server_task_cmpl_type cmpl_type) {
  1139. std::vector<server_task> tasks;
  1140. auto create_task = [&](json & task_data, bool replace_prompt, json prompt) {
  1141. server_task task;
  1142. task.id = queue_tasks.get_new_id();
  1143. task.cmpl_type = cmpl_type;
  1144. task.type = SERVER_TASK_TYPE_COMPLETION;
  1145. if (replace_prompt) {
  1146. task.data = task_data;
  1147. task.data["prompt"] = std::move(prompt);
  1148. } else {
  1149. task.data = std::move(task_data);
  1150. }
  1151. tasks.push_back(std::move(task));
  1152. };
  1153. static constexpr const char * error_msg = "\"prompt\" must be a string, an array of token ids or an array of prompts";
  1154. if (!data.contains("prompt")) {
  1155. throw std::runtime_error(error_msg);
  1156. }
  1157. json prompt = data.at("prompt");
  1158. // if the prompt is a singleton (i.e. a string or a list of tokens), we only need to create single task
  1159. if (prompt.is_string() || json_is_array_of_numbers(prompt)) {
  1160. data["index"] = 0;
  1161. create_task(data, false, nullptr);
  1162. } else if (prompt.is_array()) {
  1163. // otherwise, it's a multiple-prompt task, we break it into smaller tasks
  1164. std::vector<json> prompts = prompt;
  1165. if (cmpl_type == SERVER_TASK_CMPL_TYPE_RERANK) {
  1166. // prompts[0] is the question
  1167. // the rest are the answers/documents
  1168. SRV_DBG("creating rerank tasks, n_prompts = %d\n", (int) prompts.size() - 1);
  1169. for (size_t i = 1; i < prompts.size(); i++) {
  1170. json qd;
  1171. qd.push_back(prompts[0]);
  1172. qd.push_back(prompts[i]);
  1173. data["index"] = i - 1;
  1174. create_task(data, true, qd);
  1175. }
  1176. } else {
  1177. SRV_DBG("creating multi-prompt tasks, n_prompts = %d\n", (int) prompts.size());
  1178. for (size_t i = 0; i < prompts.size(); i++) {
  1179. const auto & e = prompts[i];
  1180. if (e.is_string() || json_is_array_of_numbers(e)) {
  1181. data["index"] = i;
  1182. create_task(data, true, e);
  1183. } else {
  1184. throw std::runtime_error(error_msg);
  1185. }
  1186. }
  1187. }
  1188. } else {
  1189. // invalid case
  1190. throw std::runtime_error(error_msg);
  1191. }
  1192. return tasks;
  1193. }
  1194. void cancel_tasks(const std::unordered_set<int> & id_tasks) {
  1195. std::vector<server_task> cancel_tasks;
  1196. cancel_tasks.reserve(id_tasks.size());
  1197. for (const auto & id_task : id_tasks) {
  1198. SRV_WRN("cancel task, id_task = %d\n", id_task);
  1199. server_task task;
  1200. task.type = SERVER_TASK_TYPE_CANCEL;
  1201. task.id_target = id_task;
  1202. cancel_tasks.push_back(task);
  1203. queue_results.remove_waiting_task_id(id_task);
  1204. }
  1205. // push to beginning of the queue, so it has highest priority
  1206. queue_tasks.post(cancel_tasks, true);
  1207. }
  1208. // receive the results from task(s) created by create_tasks_cmpl
  1209. void receive_cmpl_results(
  1210. const std::unordered_set<int> & id_tasks,
  1211. const std::function<void(std::vector<server_task_result>&)> & result_handler,
  1212. const std::function<void(json)> & error_handler) {
  1213. // TODO: currently, there is no way to detect the client has cancelled the request
  1214. std::vector<server_task_result> results(id_tasks.size());
  1215. for (size_t i = 0; i < id_tasks.size(); i++) {
  1216. server_task_result result = queue_results.recv(id_tasks);
  1217. if (result.error) {
  1218. error_handler(result.data);
  1219. cancel_tasks(id_tasks);
  1220. return;
  1221. }
  1222. const size_t idx = result.data["index"];
  1223. GGML_ASSERT(idx < results.size() && "index out of range");
  1224. results[idx] = result;
  1225. }
  1226. result_handler(results);
  1227. }
  1228. // receive the results from task(s) created by create_tasks_cmpl, in stream mode
  1229. void receive_cmpl_results_stream(
  1230. const std::unordered_set<int> & id_tasks, const
  1231. std::function<bool(server_task_result&)> & result_handler, const
  1232. std::function<void(json)> & error_handler) {
  1233. size_t n_finished = 0;
  1234. while (true) {
  1235. server_task_result result = queue_results.recv(id_tasks);
  1236. if (!result_handler(result)) {
  1237. cancel_tasks(id_tasks);
  1238. break;
  1239. }
  1240. if (result.error) {
  1241. error_handler(result.data);
  1242. cancel_tasks(id_tasks);
  1243. break;
  1244. }
  1245. if (result.stop) {
  1246. if (++n_finished == id_tasks.size()) {
  1247. break;
  1248. }
  1249. }
  1250. }
  1251. }
  1252. //
  1253. // Functions to process the task
  1254. //
  1255. void process_single_task(const server_task & task) {
  1256. switch (task.type) {
  1257. case SERVER_TASK_TYPE_COMPLETION:
  1258. {
  1259. const int id_slot = json_value(task.data, "id_slot", -1);
  1260. server_slot * slot;
  1261. if (id_slot != -1) {
  1262. slot = get_slot_by_id(id_slot);
  1263. } else {
  1264. std::string prompt;
  1265. if (task.data.contains("prompt") && task.data.at("prompt").is_string()) {
  1266. prompt = json_value(task.data, "prompt", std::string());
  1267. }
  1268. slot = get_available_slot(prompt);
  1269. }
  1270. if (slot == nullptr) {
  1271. // if no slot is available, we defer this task for processing later
  1272. SRV_DBG("no slot is available, defer task, id_task = %d\n", task.id);
  1273. queue_tasks.defer(task);
  1274. break;
  1275. }
  1276. if (slot->is_processing()) {
  1277. // if requested slot is unavailable, we defer this task for processing later
  1278. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1279. queue_tasks.defer(task);
  1280. break;
  1281. }
  1282. slot->reset();
  1283. slot->id_task = task.id;
  1284. slot->cmpl_type = task.cmpl_type;
  1285. slot->index = json_value(task.data, "index", 0);
  1286. if (!launch_slot_with_task(*slot, task)) {
  1287. SRV_ERR("failed to launch slot with task, id_task = %d\n", task.id);
  1288. break;
  1289. }
  1290. } break;
  1291. case SERVER_TASK_TYPE_CANCEL:
  1292. {
  1293. // release slot linked with the task id
  1294. for (auto & slot : slots) {
  1295. if (slot.id_task == task.id_target) {
  1296. slot.release();
  1297. break;
  1298. }
  1299. }
  1300. } break;
  1301. case SERVER_TASK_TYPE_NEXT_RESPONSE:
  1302. {
  1303. // do nothing
  1304. } break;
  1305. case SERVER_TASK_TYPE_METRICS:
  1306. {
  1307. json slots_data = json::array();
  1308. int n_idle_slots = 0;
  1309. int n_processing_slots = 0;
  1310. for (server_slot & slot : slots) {
  1311. json slot_data = get_formated_generation(slot);
  1312. slot_data["id"] = slot.id;
  1313. slot_data["id_task"] = slot.id_task;
  1314. slot_data["state"] = slot.state;
  1315. slot_data["prompt"] = slot.prompt;
  1316. slot_data["next_token"] = {
  1317. {"has_next_token", slot.has_next_token},
  1318. {"n_remain", slot.n_remaining},
  1319. {"n_decoded", slot.n_decoded},
  1320. {"stopped_eos", slot.stopped_eos},
  1321. {"stopped_word", slot.stopped_word},
  1322. {"stopped_limit", slot.stopped_limit},
  1323. {"stopping_word", slot.stopping_word},
  1324. };
  1325. if (slot_data["state"] == SLOT_STATE_IDLE) {
  1326. n_idle_slots++;
  1327. } else {
  1328. n_processing_slots++;
  1329. }
  1330. slots_data.push_back(slot_data);
  1331. }
  1332. SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots);
  1333. server_task_result res;
  1334. res.id = task.id;
  1335. res.stop = true;
  1336. res.error = false;
  1337. res.data = {
  1338. { "idle", n_idle_slots },
  1339. { "processing", n_processing_slots },
  1340. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1341. { "t_start", metrics.t_start},
  1342. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1343. { "t_tokens_generation_total", metrics.t_tokens_generation_total},
  1344. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1345. { "t_prompt_processing_total", metrics.t_prompt_processing_total},
  1346. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1347. { "t_prompt_processing", metrics.t_prompt_processing},
  1348. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1349. { "t_tokens_generation", metrics.t_tokens_generation},
  1350. { "n_decode_total", metrics.n_decode_total},
  1351. { "n_busy_slots_total", metrics.n_busy_slots_total},
  1352. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1353. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1354. { "slots", slots_data },
  1355. };
  1356. if (json_value(task.data, "reset_bucket", false)) {
  1357. metrics.reset_bucket();
  1358. }
  1359. queue_results.send(res);
  1360. } break;
  1361. case SERVER_TASK_TYPE_SLOT_SAVE:
  1362. {
  1363. int id_slot = task.data.at("id_slot");
  1364. server_slot * slot = get_slot_by_id(id_slot);
  1365. if (slot == nullptr) {
  1366. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1367. break;
  1368. }
  1369. if (slot->is_processing()) {
  1370. // if requested slot is unavailable, we defer this task for processing later
  1371. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1372. queue_tasks.defer(task);
  1373. break;
  1374. }
  1375. const size_t token_count = slot->cache_tokens.size();
  1376. const int64_t t_start = ggml_time_us();
  1377. std::string filename = task.data.at("filename");
  1378. std::string filepath = task.data.at("filepath");
  1379. const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
  1380. const int64_t t_end = ggml_time_us();
  1381. const double t_save_ms = (t_end - t_start) / 1000.0;
  1382. server_task_result result;
  1383. result.id = task.id;
  1384. result.stop = true;
  1385. result.error = false;
  1386. result.data = json {
  1387. { "id_slot", id_slot },
  1388. { "filename", filename },
  1389. { "n_saved", token_count }, // tokens saved
  1390. { "n_written", nwrite }, // bytes written
  1391. { "timings", {
  1392. { "save_ms", t_save_ms }
  1393. } }
  1394. };
  1395. queue_results.send(result);
  1396. } break;
  1397. case SERVER_TASK_TYPE_SLOT_RESTORE:
  1398. {
  1399. int id_slot = task.data.at("id_slot");
  1400. server_slot * slot = get_slot_by_id(id_slot);
  1401. if (slot == nullptr) {
  1402. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1403. break;
  1404. }
  1405. if (slot->is_processing()) {
  1406. // if requested slot is unavailable, we defer this task for processing later
  1407. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1408. queue_tasks.defer(task);
  1409. break;
  1410. }
  1411. const int64_t t_start = ggml_time_us();
  1412. std::string filename = task.data.at("filename");
  1413. std::string filepath = task.data.at("filepath");
  1414. slot->cache_tokens.resize(slot->n_ctx);
  1415. size_t token_count = 0;
  1416. size_t nread = llama_state_seq_load_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), slot->cache_tokens.size(), &token_count);
  1417. if (nread == 0) {
  1418. slot->cache_tokens.resize(0);
  1419. send_error(task, "Unable to restore slot, no available space in KV cache or invalid slot save file", ERROR_TYPE_INVALID_REQUEST);
  1420. break;
  1421. }
  1422. slot->cache_tokens.resize(token_count);
  1423. // TODO: maybe detokenize the slot->cache_tokens instead?
  1424. slot->prompt = string_format("[restored %d tokens from file]", (int) token_count);
  1425. const int64_t t_end = ggml_time_us();
  1426. const double t_restore_ms = (t_end - t_start) / 1000.0;
  1427. server_task_result result;
  1428. result.id = task.id;
  1429. result.stop = true;
  1430. result.error = false;
  1431. result.data = json {
  1432. { "id_slot", id_slot },
  1433. { "filename", filename },
  1434. { "n_restored", token_count }, // tokens restored
  1435. { "n_read", nread }, // bytes read
  1436. { "timings", {
  1437. { "restore_ms", t_restore_ms }
  1438. } }
  1439. };
  1440. queue_results.send(result);
  1441. } break;
  1442. case SERVER_TASK_TYPE_SLOT_ERASE:
  1443. {
  1444. int id_slot = task.data.at("id_slot");
  1445. server_slot * slot = get_slot_by_id(id_slot);
  1446. if (slot == nullptr) {
  1447. send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
  1448. break;
  1449. }
  1450. if (slot->is_processing()) {
  1451. // if requested slot is unavailable, we defer this task for processing later
  1452. SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
  1453. queue_tasks.defer(task);
  1454. break;
  1455. }
  1456. // Erase token cache
  1457. const size_t n_erased = slot->cache_tokens.size();
  1458. llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1);
  1459. slot->cache_tokens.clear();
  1460. server_task_result result;
  1461. result.id = task.id;
  1462. result.stop = true;
  1463. result.error = false;
  1464. result.data = json {
  1465. { "id_slot", id_slot },
  1466. { "n_erased", n_erased }
  1467. };
  1468. queue_results.send(result);
  1469. } break;
  1470. case SERVER_TASK_TYPE_SET_LORA:
  1471. {
  1472. common_lora_adapters_apply(ctx, loras);
  1473. server_task_result result;
  1474. result.id = task.id;
  1475. result.stop = true;
  1476. result.error = false;
  1477. result.data = json{{ "success", true }};
  1478. queue_results.send(result);
  1479. } break;
  1480. }
  1481. }
  1482. void update_slots() {
  1483. // check if all slots are idle
  1484. {
  1485. bool all_idle = true;
  1486. for (auto & slot : slots) {
  1487. if (slot.is_processing()) {
  1488. all_idle = false;
  1489. break;
  1490. }
  1491. }
  1492. if (all_idle) {
  1493. SRV_INF("%s", "all slots are idle\n");
  1494. if (clean_kv_cache) {
  1495. kv_cache_clear();
  1496. }
  1497. return;
  1498. }
  1499. }
  1500. {
  1501. SRV_DBG("%s", "posting NEXT_RESPONSE\n");
  1502. server_task task;
  1503. task.type = SERVER_TASK_TYPE_NEXT_RESPONSE;
  1504. task.id_target = -1;
  1505. queue_tasks.post(task);
  1506. }
  1507. // apply context-shift if needed
  1508. // TODO: simplify and improve
  1509. for (server_slot & slot : slots) {
  1510. if (slot.ga_n == 1) {
  1511. if (slot.is_processing() && slot.n_past >= slot.n_ctx - 1) {
  1512. if (!params.ctx_shift) {
  1513. // this check is redundant (for good)
  1514. // we should never get here, because generation should already stopped in process_token()
  1515. slot.release();
  1516. send_error(slot, "context shift is disabled", ERROR_TYPE_SERVER);
  1517. continue;
  1518. }
  1519. // Shift context
  1520. const int n_keep = slot.params.n_keep + add_bos_token;
  1521. const int n_left = slot.n_past - n_keep;
  1522. const int n_discard = slot.params.n_discard ? slot.params.n_discard : (n_left / 2);
  1523. SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard);
  1524. llama_kv_cache_seq_rm (ctx, slot.id + 1, n_keep , n_keep + n_discard);
  1525. llama_kv_cache_seq_add(ctx, slot.id + 1, n_keep + n_discard, slot.n_past, -n_discard);
  1526. if (slot.params.cache_prompt) {
  1527. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++) {
  1528. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1529. }
  1530. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1531. }
  1532. slot.n_past -= n_discard;
  1533. slot.truncated = true;
  1534. }
  1535. }
  1536. }
  1537. // start populating the batch for this iteration
  1538. common_batch_clear(batch);
  1539. // frist, add sampled tokens from any ongoing sequences
  1540. for (auto & slot : slots) {
  1541. if (slot.state != SLOT_STATE_GENERATING) {
  1542. continue;
  1543. }
  1544. slot.i_batch = batch.n_tokens;
  1545. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1546. common_batch_add(batch, slot.sampled, slot_npast, { slot.id + 1 }, true);
  1547. slot.n_past += 1;
  1548. if (slot.params.cache_prompt) {
  1549. slot.cache_tokens.push_back(slot.sampled);
  1550. }
  1551. SLT_DBG(slot, "slot decode token, n_ctx = %d, n_past = %d, n_cache_tokens = %d, truncated = %d\n",
  1552. slot.n_ctx, slot.n_past, (int) slot.cache_tokens.size(), slot.truncated);
  1553. }
  1554. // process in chunks of params.n_batch
  1555. int32_t n_batch = llama_n_batch(ctx);
  1556. int32_t n_ubatch = llama_n_ubatch(ctx);
  1557. // track if this is an embedding or non-embedding batch
  1558. // if we've added sampled tokens above, we are in non-embedding mode
  1559. // -1: none, 0: non-embedding, 1: embedding
  1560. // TODO: make enum
  1561. int32_t batch_type = batch.n_tokens > 0 ? 0 : -1;
  1562. // next, batch any pending prompts without exceeding n_batch
  1563. if (params.cont_batching || batch.n_tokens == 0) {
  1564. for (auto & slot : slots) {
  1565. // this slot still has a prompt to be processed
  1566. if (slot.state == SLOT_STATE_PROCESSING_PROMPT) {
  1567. auto & prompt_tokens = slot.prompt_tokens;
  1568. // we haven't tokenized the prompt yet - do it now:
  1569. if (prompt_tokens.empty()) {
  1570. SLT_INF(slot, "tokenizing prompt, len = %d\n", (int) slot.prompt.size());
  1571. slot.t_start_process_prompt = ggml_time_us();
  1572. slot.t_start_generation = 0;
  1573. switch (slot.cmpl_type) {
  1574. case SERVER_TASK_CMPL_TYPE_NORMAL:
  1575. case SERVER_TASK_CMPL_TYPE_EMBEDDING:
  1576. {
  1577. prompt_tokens = tokenize(slot.prompt, llama_add_bos_token(model), true);
  1578. } break;
  1579. case SERVER_TASK_CMPL_TYPE_RERANK:
  1580. {
  1581. // require slot.prompt to be array of 2 strings
  1582. if (!slot.prompt.is_array() || slot.prompt.size() != 2) {
  1583. SLT_ERR(slot, "%s", "invalid prompt for rerank task\n");
  1584. slot.release();
  1585. send_error(slot, "invalid prompt for rerank task", ERROR_TYPE_INVALID_REQUEST);
  1586. continue;
  1587. }
  1588. // prompt: [BOS]query[EOS][SEP]doc[EOS]
  1589. prompt_tokens.clear();
  1590. prompt_tokens.push_back(llama_token_bos(model));
  1591. {
  1592. const auto part = tokenize(slot.prompt[0], false, false);
  1593. prompt_tokens.insert(prompt_tokens.end(), part.begin(), part.end());
  1594. }
  1595. prompt_tokens.push_back(llama_token_eos(model));
  1596. prompt_tokens.push_back(llama_token_sep(model));
  1597. {
  1598. const auto part = tokenize(slot.prompt[1], false, false);
  1599. prompt_tokens.insert(prompt_tokens.end(), part.begin(), part.end());
  1600. }
  1601. prompt_tokens.push_back(llama_token_eos(model));
  1602. } break;
  1603. case SERVER_TASK_CMPL_TYPE_INFILL:
  1604. {
  1605. auto prefix_tokens = tokenize(slot.params.input_prefix, false, false);
  1606. auto suffix_tokens = tokenize(slot.params.input_suffix, false, false);
  1607. prefix_tokens.insert(prefix_tokens.begin(), llama_token_fim_pre(model));
  1608. suffix_tokens.insert(suffix_tokens.begin(), llama_token_fim_suf(model));
  1609. auto embd_inp = params.spm_infill ? suffix_tokens : prefix_tokens;
  1610. auto embd_end = params.spm_infill ? prefix_tokens : suffix_tokens;
  1611. if (llama_add_bos_token(model)) {
  1612. embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
  1613. }
  1614. embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
  1615. embd_inp.push_back(llama_token_fim_mid(model));
  1616. prompt_tokens = std::move(embd_inp);
  1617. } break;
  1618. }
  1619. slot.n_past = 0;
  1620. slot.n_prompt_tokens = prompt_tokens.size();
  1621. SLT_INF(slot, "prompt tokenized, n_ctx_slot = %d, n_keep = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, slot.n_prompt_tokens);
  1622. // print prompt tokens:
  1623. for (int i = 0; i < (int) prompt_tokens.size(); i++) {
  1624. SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
  1625. }
  1626. // empty prompt passed -> release the slot and send empty response
  1627. if (prompt_tokens.empty()) {
  1628. SLT_WRN(slot, "%s", "empty prompt - releasing slot\n");
  1629. slot.release();
  1630. slot.print_timings();
  1631. send_final_response(slot);
  1632. continue;
  1633. }
  1634. if (slot.cmpl_type == SERVER_TASK_CMPL_TYPE_EMBEDDING || slot.cmpl_type == SERVER_TASK_CMPL_TYPE_RERANK) {
  1635. // this prompt is too large to process - discard it
  1636. if (slot.n_prompt_tokens > n_ubatch) {
  1637. slot.release();
  1638. send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
  1639. continue;
  1640. }
  1641. } else {
  1642. if (!params.ctx_shift) {
  1643. // if context shift is disabled, we make sure prompt size is smaller than KV size
  1644. if (slot.n_prompt_tokens >= slot.n_ctx) {
  1645. slot.release();
  1646. send_error(slot, "the request exceeds the available context size. try increasing the context size or enable context shift", ERROR_TYPE_INVALID_REQUEST);
  1647. continue;
  1648. }
  1649. }
  1650. if (slot.params.n_keep < 0) {
  1651. slot.params.n_keep = slot.n_prompt_tokens;
  1652. }
  1653. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1654. // if input prompt is too big, truncate it (if group attention self-extend is disabled)
  1655. if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) {
  1656. const int n_left = slot.n_ctx - slot.params.n_keep;
  1657. const int n_block_size = n_left / 2;
  1658. const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1659. std::vector<llama_token> new_tokens(
  1660. prompt_tokens.begin(),
  1661. prompt_tokens.begin() + slot.params.n_keep);
  1662. new_tokens.insert(
  1663. new_tokens.end(),
  1664. prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
  1665. prompt_tokens.end());
  1666. prompt_tokens = std::move(new_tokens);
  1667. slot.truncated = true;
  1668. slot.n_prompt_tokens = prompt_tokens.size();
  1669. SLT_WRN(slot, "input truncated, n_ctx = %d, n_keep = %d, n_left = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, n_left, slot.n_prompt_tokens);
  1670. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1671. }
  1672. common_sampler_reset(slot.smpl);
  1673. if (!slot.params.cache_prompt) {
  1674. slot.n_past_se = 0;
  1675. slot.ga_i = 0;
  1676. } else {
  1677. GGML_ASSERT(slot.ga_n == 1);
  1678. // reuse any previously computed tokens that are common with the new prompt
  1679. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1680. // push the prompt into the sampling context (do not apply grammar)
  1681. for (int i = 0; i < slot.n_past; ++i) {
  1682. common_sampler_accept(slot.smpl, slot.cache_tokens[i], false);
  1683. }
  1684. }
  1685. }
  1686. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) {
  1687. // we have to evaluate at least 1 token to generate logits.
  1688. SLT_WRN(slot, "need to evaluate at least 1 token to generate logits, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens);
  1689. slot.n_past--;
  1690. if (slot.ga_i > 0) {
  1691. slot.n_past_se--;
  1692. }
  1693. }
  1694. slot.n_prompt_tokens_processed = 0;
  1695. }
  1696. // non-causal tasks require to fit the entire prompt in the physical batch
  1697. if (slot.cmpl_type == SERVER_TASK_CMPL_TYPE_EMBEDDING || slot.cmpl_type == SERVER_TASK_CMPL_TYPE_RERANK) {
  1698. // cannot fit the prompt in the current batch - will try next iter
  1699. if (batch.n_tokens + slot.n_prompt_tokens > n_batch) {
  1700. continue;
  1701. }
  1702. }
  1703. // check that we are in the right batch_type, if not defer the slot
  1704. const bool slot_type =
  1705. slot.cmpl_type == SERVER_TASK_CMPL_TYPE_EMBEDDING ||
  1706. slot.cmpl_type == SERVER_TASK_CMPL_TYPE_RERANK ? 1 : 0;
  1707. if (batch_type == -1) {
  1708. batch_type = slot_type;
  1709. } else if (batch_type != slot_type) {
  1710. continue;
  1711. }
  1712. // keep only the common part
  1713. int p0 = slot.n_past;
  1714. if (!llama_kv_cache_seq_rm(ctx, slot.id + 1, p0, -1)) {
  1715. // could not partially delete (likely using a non-Transformer model)
  1716. llama_kv_cache_seq_rm(ctx, slot.id + 1, -1, -1);
  1717. p0 = 0;
  1718. // there is no common part left
  1719. slot.n_past = 0;
  1720. slot.n_past_se = 0;
  1721. slot.ga_i = 0;
  1722. common_sampler_reset(slot.smpl);
  1723. }
  1724. // remove the non-common part from the cache
  1725. slot.cache_tokens.resize(slot.n_past);
  1726. SLT_INF(slot, "kv cache rm [%d, end)\n", p0);
  1727. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1728. int32_t ga_i = slot.ga_i;
  1729. int32_t ga_n = slot.ga_n;
  1730. int32_t ga_w = slot.ga_w;
  1731. // add prompt tokens for processing in the current batch
  1732. // TODO: the self-extend stuff here is a mess - simplify and/or abstract it somehow
  1733. for (; slot.n_past < slot.n_prompt_tokens && batch.n_tokens < n_batch; ++slot.n_past) {
  1734. if (slot.ga_n != 1) {
  1735. while (slot_npast >= ga_i + ga_w) {
  1736. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1737. slot_npast -= bd;
  1738. ga_i += ga_w/ga_n;
  1739. }
  1740. }
  1741. common_batch_add(batch, prompt_tokens[slot.n_past], slot_npast, { slot.id + 1 }, false);
  1742. if (slot.params.cache_prompt) {
  1743. slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
  1744. }
  1745. slot.n_prompt_tokens_processed++;
  1746. slot_npast++;
  1747. }
  1748. SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);
  1749. // entire prompt has been processed
  1750. if (slot.n_past == slot.n_prompt_tokens) {
  1751. slot.state = SLOT_STATE_DONE_PROMPT;
  1752. GGML_ASSERT(batch.n_tokens > 0);
  1753. // extract the logits only for the last token
  1754. batch.logits[batch.n_tokens - 1] = true;
  1755. slot.n_decoded = 0;
  1756. slot.i_batch = batch.n_tokens - 1;
  1757. SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
  1758. }
  1759. }
  1760. if (batch.n_tokens >= n_batch) {
  1761. break;
  1762. }
  1763. }
  1764. }
  1765. if (batch.n_tokens == 0) {
  1766. SRV_WRN("%s", "no tokens to decode\n");
  1767. return;
  1768. }
  1769. SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens);
  1770. // make sure we're in the right embedding mode
  1771. llama_set_embeddings(ctx, batch_type == 1);
  1772. // process the created batch of tokens
  1773. for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
  1774. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1775. for (auto & slot : slots) {
  1776. if (slot.ga_n != 1) {
  1777. // context extension via Self-Extend
  1778. // TODO: simplify and/or abstract this
  1779. while (slot.n_past_se >= slot.ga_i + slot.ga_w) {
  1780. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1781. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1782. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1783. SLT_DBG(slot, "shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1784. SLT_DBG(slot, "div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1785. SLT_DBG(slot, "shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1786. llama_kv_cache_seq_add(ctx, slot.id + 1, slot.ga_i, slot.n_past_se, ib * bd);
  1787. llama_kv_cache_seq_div(ctx, slot.id + 1, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n);
  1788. llama_kv_cache_seq_add(ctx, slot.id + 1, slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd);
  1789. slot.n_past_se -= bd;
  1790. slot.ga_i += slot.ga_w / slot.ga_n;
  1791. SLT_DBG(slot, "\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1792. }
  1793. slot.n_past_se += n_tokens;
  1794. }
  1795. }
  1796. llama_batch batch_view = {
  1797. n_tokens,
  1798. batch.token + i,
  1799. nullptr,
  1800. batch.pos + i,
  1801. batch.n_seq_id + i,
  1802. batch.seq_id + i,
  1803. batch.logits + i,
  1804. 0, 0, 0, // unused
  1805. };
  1806. const int ret = llama_decode(ctx, batch_view);
  1807. metrics.on_decoded(slots);
  1808. if (ret != 0) {
  1809. if (n_batch == 1 || ret < 0) {
  1810. // if you get here, it means the KV cache is full - try increasing it via the context size
  1811. SRV_ERR("failed to decode the batch: KV cache is full - try increasing it via the context size, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
  1812. for (auto & slot : slots) {
  1813. slot.release();
  1814. send_error(slot, "Input prompt is too big compared to KV size. Please try increasing KV size.");
  1815. }
  1816. break; // break loop of n_batch
  1817. }
  1818. // retry with half the batch size to try to find a free slot in the KV cache
  1819. n_batch /= 2;
  1820. i -= n_batch;
  1821. SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret);
  1822. continue; // continue loop of n_batch
  1823. }
  1824. for (auto & slot : slots) {
  1825. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) {
  1826. continue; // continue loop of slots
  1827. }
  1828. if (slot.state == SLOT_STATE_DONE_PROMPT) {
  1829. if (slot.cmpl_type == SERVER_TASK_CMPL_TYPE_EMBEDDING) {
  1830. // prompt evaluated for embedding
  1831. send_embedding(slot, batch_view);
  1832. slot.release();
  1833. slot.i_batch = -1;
  1834. continue; // continue loop of slots
  1835. }
  1836. if (slot.cmpl_type == SERVER_TASK_CMPL_TYPE_RERANK) {
  1837. send_rerank(slot, batch_view);
  1838. slot.release();
  1839. slot.i_batch = -1;
  1840. continue; // continue loop of slots
  1841. }
  1842. // prompt evaluated for next-token prediction
  1843. slot.state = SLOT_STATE_GENERATING;
  1844. } else if (slot.state != SLOT_STATE_GENERATING) {
  1845. continue; // continue loop of slots
  1846. }
  1847. completion_token_output result;
  1848. const llama_token id = common_sampler_sample(slot.smpl, ctx, slot.i_batch - i);
  1849. common_sampler_accept(slot.smpl, id, true);
  1850. slot.n_decoded += 1;
  1851. if (slot.n_decoded == 1) {
  1852. slot.t_start_generation = ggml_time_us();
  1853. slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3;
  1854. metrics.on_prompt_eval(slot);
  1855. }
  1856. result.tok = id;
  1857. const auto * cur_p = common_sampler_get_candidates(slot.smpl);
  1858. for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) {
  1859. result.probs.push_back({
  1860. cur_p->data[i].id,
  1861. i >= cur_p->size ? 0.0f : cur_p->data[i].p,
  1862. });
  1863. }
  1864. if (!process_token(result, slot)) {
  1865. // release slot because of stop condition
  1866. slot.release();
  1867. slot.print_timings();
  1868. send_final_response(slot);
  1869. metrics.on_prediction(slot);
  1870. }
  1871. slot.i_batch = -1;
  1872. }
  1873. }
  1874. SRV_DBG("%s", "run slots completed\n");
  1875. }
  1876. json model_meta() const {
  1877. return json {
  1878. {"vocab_type", llama_vocab_type (model)},
  1879. {"n_vocab", llama_n_vocab (model)},
  1880. {"n_ctx_train", llama_n_ctx_train (model)},
  1881. {"n_embd", llama_n_embd (model)},
  1882. {"n_params", llama_model_n_params(model)},
  1883. {"size", llama_model_size (model)},
  1884. };
  1885. }
  1886. };
  1887. static void log_server_request(const httplib::Request & req, const httplib::Response & res) {
  1888. // skip GH copilot requests when using default port
  1889. if (req.path == "/v1/health" || req.path == "/v1/completions") {
  1890. return;
  1891. }
  1892. LOG_INF("request: %s %s %s %d\n", req.method.c_str(), req.path.c_str(), req.remote_addr.c_str(), res.status);
  1893. LOG_DBG("request: %s\n", req.body.c_str());
  1894. LOG_DBG("response: %s\n", res.body.c_str());
  1895. }
  1896. std::function<void(int)> shutdown_handler;
  1897. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  1898. inline void signal_handler(int signal) {
  1899. if (is_terminating.test_and_set()) {
  1900. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  1901. // this is for better developer experience, we can remove when the server is stable enough
  1902. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  1903. exit(1);
  1904. }
  1905. shutdown_handler(signal);
  1906. }
  1907. int main(int argc, char ** argv) {
  1908. // own arguments required by this example
  1909. common_params params;
  1910. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) {
  1911. return 1;
  1912. }
  1913. common_init();
  1914. // enabling this will output extra debug information in the HTTP responses from the server
  1915. // see format_final_response_oaicompat()
  1916. const bool verbose = params.verbosity > 9;
  1917. // struct that contains llama context and inference
  1918. server_context ctx_server;
  1919. if (params.model_alias == "unknown") {
  1920. params.model_alias = params.model;
  1921. }
  1922. llama_backend_init();
  1923. llama_numa_init(params.numa);
  1924. LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
  1925. LOG_INF("\n");
  1926. LOG_INF("%s\n", common_params_get_system_info(params).c_str());
  1927. LOG_INF("\n");
  1928. std::unique_ptr<httplib::Server> svr;
  1929. #ifdef CPPHTTPLIB_OPENSSL_SUPPORT
  1930. if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
  1931. LOG_INF("Running with SSL: key = %s, cert = %s\n", params.ssl_file_key.c_str(), params.ssl_file_cert.c_str());
  1932. svr.reset(
  1933. new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str())
  1934. );
  1935. } else {
  1936. LOG_INF("Running without SSL\n");
  1937. svr.reset(new httplib::Server());
  1938. }
  1939. #else
  1940. if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
  1941. LOG_ERR("Server is built without SSL support\n");
  1942. return 1;
  1943. }
  1944. svr.reset(new httplib::Server());
  1945. #endif
  1946. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  1947. svr->set_default_headers({{"Server", "llama.cpp"}});
  1948. // CORS preflight
  1949. svr->Options(R"(.*)", [](const httplib::Request &, httplib::Response & res) {
  1950. // Access-Control-Allow-Origin is already set by middleware
  1951. res.set_header("Access-Control-Allow-Credentials", "true");
  1952. res.set_header("Access-Control-Allow-Methods", "POST");
  1953. res.set_header("Access-Control-Allow-Headers", "*");
  1954. return res.set_content("", "text/html"); // blank response, no data
  1955. });
  1956. svr->set_logger(log_server_request);
  1957. auto res_error = [](httplib::Response & res, const json & error_data) {
  1958. json final_response {{"error", error_data}};
  1959. res.set_content(final_response.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
  1960. res.status = json_value(error_data, "code", 500);
  1961. };
  1962. auto res_ok = [](httplib::Response & res, const json & data) {
  1963. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
  1964. res.status = 200;
  1965. };
  1966. svr->set_exception_handler([&res_error](const httplib::Request &, httplib::Response & res, std::exception_ptr ep) {
  1967. std::string message;
  1968. try {
  1969. std::rethrow_exception(ep);
  1970. } catch (std::exception & e) {
  1971. message = e.what();
  1972. } catch (...) {
  1973. message = "Unknown Exception";
  1974. }
  1975. json formatted_error = format_error_response(message, ERROR_TYPE_SERVER);
  1976. LOG_WRN("got exception: %s\n", formatted_error.dump().c_str());
  1977. res_error(res, formatted_error);
  1978. });
  1979. svr->set_error_handler([&res_error](const httplib::Request &, httplib::Response & res) {
  1980. if (res.status == 404) {
  1981. res_error(res, format_error_response("File Not Found", ERROR_TYPE_NOT_FOUND));
  1982. }
  1983. // for other error codes, we skip processing here because it's already done by res_error()
  1984. });
  1985. // set timeouts and change hostname and port
  1986. svr->set_read_timeout (params.timeout_read);
  1987. svr->set_write_timeout(params.timeout_write);
  1988. std::unordered_map<std::string, std::string> log_data;
  1989. log_data["hostname"] = params.hostname;
  1990. log_data["port"] = std::to_string(params.port);
  1991. if (params.api_keys.size() == 1) {
  1992. auto key = params.api_keys[0];
  1993. log_data["api_key"] = "api_key: ****" + key.substr(std::max((int)(key.length() - 4), 0));
  1994. } else if (params.api_keys.size() > 1) {
  1995. log_data["api_key"] = "api_key: " + std::to_string(params.api_keys.size()) + " keys loaded";
  1996. }
  1997. // Necessary similarity of prompt for slot selection
  1998. ctx_server.slot_prompt_similarity = params.slot_prompt_similarity;
  1999. //
  2000. // Middlewares
  2001. //
  2002. auto middleware_validate_api_key = [&params, &res_error](const httplib::Request & req, httplib::Response & res) {
  2003. static const std::unordered_set<std::string> public_endpoints = {
  2004. "/health",
  2005. "/models",
  2006. "/v1/models",
  2007. };
  2008. // If API key is not set, skip validation
  2009. if (params.api_keys.empty()) {
  2010. return true;
  2011. }
  2012. // If path is public, skip validation
  2013. if (public_endpoints.find(req.path) != public_endpoints.end()) {
  2014. return true;
  2015. }
  2016. // Check for API key in the header
  2017. auto auth_header = req.get_header_value("Authorization");
  2018. std::string prefix = "Bearer ";
  2019. if (auth_header.substr(0, prefix.size()) == prefix) {
  2020. std::string received_api_key = auth_header.substr(prefix.size());
  2021. if (std::find(params.api_keys.begin(), params.api_keys.end(), received_api_key) != params.api_keys.end()) {
  2022. return true; // API key is valid
  2023. }
  2024. }
  2025. // API key is invalid or not provided
  2026. res_error(res, format_error_response("Invalid API Key", ERROR_TYPE_AUTHENTICATION));
  2027. LOG_WRN("Unauthorized: Invalid API Key\n");
  2028. return false;
  2029. };
  2030. auto middleware_server_state = [&res_error, &state](const httplib::Request & req, httplib::Response & res) {
  2031. server_state current_state = state.load();
  2032. if (current_state == SERVER_STATE_LOADING_MODEL) {
  2033. auto tmp = string_split(req.path, '.');
  2034. if (req.path == "/" || tmp.back() == "html") {
  2035. res.set_content(reinterpret_cast<const char*>(loading_html), loading_html_len, "text/html; charset=utf-8");
  2036. res.status = 503;
  2037. } else {
  2038. res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE));
  2039. }
  2040. return false;
  2041. }
  2042. return true;
  2043. };
  2044. // register server middlewares
  2045. svr->set_pre_routing_handler([&middleware_validate_api_key, &middleware_server_state](const httplib::Request & req, httplib::Response & res) {
  2046. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2047. if (!middleware_server_state(req, res)) {
  2048. return httplib::Server::HandlerResponse::Handled;
  2049. }
  2050. if (!middleware_validate_api_key(req, res)) {
  2051. return httplib::Server::HandlerResponse::Handled;
  2052. }
  2053. return httplib::Server::HandlerResponse::Unhandled;
  2054. });
  2055. //
  2056. // Route handlers (or controllers)
  2057. //
  2058. const auto handle_health = [&](const httplib::Request &, httplib::Response & res) {
  2059. // error and loading states are handled by middleware
  2060. json health = {{"status", "ok"}};
  2061. res_ok(res, health);
  2062. };
  2063. const auto handle_slots = [&](const httplib::Request & req, httplib::Response & res) {
  2064. if (!params.endpoint_slots) {
  2065. res_error(res, format_error_response("This server does not support slots endpoint. Start it with `--slots`", ERROR_TYPE_NOT_SUPPORTED));
  2066. return;
  2067. }
  2068. // request slots data using task queue
  2069. server_task task;
  2070. task.id = ctx_server.queue_tasks.get_new_id();
  2071. task.type = SERVER_TASK_TYPE_METRICS;
  2072. ctx_server.queue_results.add_waiting_task_id(task.id);
  2073. ctx_server.queue_tasks.post(task, true); // high-priority task
  2074. // get the result
  2075. server_task_result result = ctx_server.queue_results.recv(task.id);
  2076. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2077. // optionally return "fail_on_no_slot" error
  2078. const int n_idle_slots = result.data.at("idle");
  2079. if (req.has_param("fail_on_no_slot")) {
  2080. if (n_idle_slots == 0) {
  2081. res_error(res, format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE));
  2082. return;
  2083. }
  2084. }
  2085. res_ok(res, result.data.at("slots"));
  2086. };
  2087. const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) {
  2088. if (!params.endpoint_metrics) {
  2089. res_error(res, format_error_response("This server does not support metrics endpoint. Start it with `--metrics`", ERROR_TYPE_NOT_SUPPORTED));
  2090. return;
  2091. }
  2092. // request slots data using task queue
  2093. server_task task;
  2094. task.id = ctx_server.queue_tasks.get_new_id();
  2095. task.id_target = -1;
  2096. task.type = SERVER_TASK_TYPE_METRICS;
  2097. task.data.push_back({{"reset_bucket", true}});
  2098. ctx_server.queue_results.add_waiting_task_id(task.id);
  2099. ctx_server.queue_tasks.post(task, true); // high-priority task
  2100. // get the result
  2101. server_task_result result = ctx_server.queue_results.recv(task.id);
  2102. ctx_server.queue_results.remove_waiting_task_id(task.id);
  2103. json data = result.data;
  2104. const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed");
  2105. const uint64_t t_prompt_processing = data.at("t_prompt_processing");
  2106. const uint64_t n_tokens_predicted = data.at("n_tokens_predicted");
  2107. const uint64_t t_tokens_generation = data.at("t_tokens_generation");
  2108. const uint64_t n_decode_total = data.at("n_decode_total");
  2109. const uint64_t n_busy_slots_total = data.at("n_busy_slots_total");
  2110. const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells");
  2111. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2112. json all_metrics_def = json {
  2113. {"counter", {{
  2114. {"name", "prompt_tokens_total"},
  2115. {"help", "Number of prompt tokens processed."},
  2116. {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")}
  2117. }, {
  2118. {"name", "prompt_seconds_total"},
  2119. {"help", "Prompt process time"},
  2120. {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3}
  2121. }, {
  2122. {"name", "tokens_predicted_total"},
  2123. {"help", "Number of generation tokens processed."},
  2124. {"value", (uint64_t) data.at("n_tokens_predicted_total")}
  2125. }, {
  2126. {"name", "tokens_predicted_seconds_total"},
  2127. {"help", "Predict process time"},
  2128. {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3}
  2129. }, {
  2130. {"name", "n_decode_total"},
  2131. {"help", "Total number of llama_decode() calls"},
  2132. {"value", n_decode_total}
  2133. }, {
  2134. {"name", "n_busy_slots_per_decode"},
  2135. {"help", "Average number of busy slots per llama_decode() call"},
  2136. {"value", (float) n_busy_slots_total / (float) n_decode_total}
  2137. }}},
  2138. {"gauge", {{
  2139. {"name", "prompt_tokens_seconds"},
  2140. {"help", "Average prompt throughput in tokens/s."},
  2141. {"value", n_prompt_tokens_processed ? 1.e3 / t_prompt_processing * n_prompt_tokens_processed : 0.}
  2142. },{
  2143. {"name", "predicted_tokens_seconds"},
  2144. {"help", "Average generation throughput in tokens/s."},
  2145. {"value", n_tokens_predicted ? 1.e3 / t_tokens_generation * n_tokens_predicted : 0.}
  2146. },{
  2147. {"name", "kv_cache_usage_ratio"},
  2148. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2149. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2150. },{
  2151. {"name", "kv_cache_tokens"},
  2152. {"help", "KV-cache tokens."},
  2153. {"value", (uint64_t) data.at("kv_cache_tokens_count")}
  2154. },{
  2155. {"name", "requests_processing"},
  2156. {"help", "Number of request processing."},
  2157. {"value", (uint64_t) data.at("processing")}
  2158. },{
  2159. {"name", "requests_deferred"},
  2160. {"help", "Number of request deferred."},
  2161. {"value", (uint64_t) data.at("deferred")}
  2162. }}}
  2163. };
  2164. std::stringstream prometheus;
  2165. for (const auto & el : all_metrics_def.items()) {
  2166. const auto & type = el.key();
  2167. const auto & metrics_def = el.value();
  2168. for (const auto & metric_def : metrics_def) {
  2169. const std::string name = metric_def.at("name");
  2170. const std::string help = metric_def.at("help");
  2171. auto value = json_value(metric_def, "value", 0.);
  2172. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2173. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2174. << "llamacpp:" << name << " " << value << "\n";
  2175. }
  2176. }
  2177. const int64_t t_start = data.at("t_start");
  2178. res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
  2179. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2180. res.status = 200; // HTTP OK
  2181. };
  2182. const auto handle_slots_save = [&ctx_server, &res_error, &res_ok, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2183. json request_data = json::parse(req.body);
  2184. std::string filename = request_data.at("filename");
  2185. if (!fs_validate_filename(filename)) {
  2186. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2187. return;
  2188. }
  2189. std::string filepath = params.slot_save_path + filename;
  2190. server_task task;
  2191. task.type = SERVER_TASK_TYPE_SLOT_SAVE;
  2192. task.data = {
  2193. { "id_slot", id_slot },
  2194. { "filename", filename },
  2195. { "filepath", filepath },
  2196. };
  2197. const int id_task = ctx_server.queue_tasks.post(task);
  2198. ctx_server.queue_results.add_waiting_task_id(id_task);
  2199. server_task_result result = ctx_server.queue_results.recv(id_task);
  2200. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2201. if (result.error) {
  2202. res_error(res, result.data);
  2203. } else {
  2204. res_ok(res, result.data);
  2205. }
  2206. };
  2207. const auto handle_slots_restore = [&ctx_server, &res_error, &res_ok, &params](const httplib::Request & req, httplib::Response & res, int id_slot) {
  2208. json request_data = json::parse(req.body);
  2209. std::string filename = request_data.at("filename");
  2210. if (!fs_validate_filename(filename)) {
  2211. res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
  2212. return;
  2213. }
  2214. std::string filepath = params.slot_save_path + filename;
  2215. server_task task;
  2216. task.type = SERVER_TASK_TYPE_SLOT_RESTORE;
  2217. task.data = {
  2218. { "id_slot", id_slot },
  2219. { "filename", filename },
  2220. { "filepath", filepath },
  2221. };
  2222. const int id_task = ctx_server.queue_tasks.post(task);
  2223. ctx_server.queue_results.add_waiting_task_id(id_task);
  2224. server_task_result result = ctx_server.queue_results.recv(id_task);
  2225. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2226. if (result.error) {
  2227. res_error(res, result.data);
  2228. } else {
  2229. res_ok(res, result.data);
  2230. }
  2231. };
  2232. const auto handle_slots_erase = [&ctx_server, &res_error, &res_ok](const httplib::Request & /* req */, httplib::Response & res, int id_slot) {
  2233. server_task task;
  2234. task.type = SERVER_TASK_TYPE_SLOT_ERASE;
  2235. task.data = {
  2236. { "id_slot", id_slot },
  2237. };
  2238. const int id_task = ctx_server.queue_tasks.post(task);
  2239. ctx_server.queue_results.add_waiting_task_id(id_task);
  2240. server_task_result result = ctx_server.queue_results.recv(id_task);
  2241. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2242. if (result.error) {
  2243. res_error(res, result.data);
  2244. } else {
  2245. res_ok(res, result.data);
  2246. }
  2247. };
  2248. const auto handle_slots_action = [&params, &res_error, &handle_slots_save, &handle_slots_restore, &handle_slots_erase](const httplib::Request & req, httplib::Response & res) {
  2249. if (params.slot_save_path.empty()) {
  2250. res_error(res, format_error_response("This server does not support slots action. Start it with `--slot-save-path`", ERROR_TYPE_NOT_SUPPORTED));
  2251. return;
  2252. }
  2253. std::string id_slot_str = req.path_params.at("id_slot");
  2254. int id_slot;
  2255. try {
  2256. id_slot = std::stoi(id_slot_str);
  2257. } catch (const std::exception &) {
  2258. res_error(res, format_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST));
  2259. return;
  2260. }
  2261. std::string action = req.get_param_value("action");
  2262. if (action == "save") {
  2263. handle_slots_save(req, res, id_slot);
  2264. } else if (action == "restore") {
  2265. handle_slots_restore(req, res, id_slot);
  2266. } else if (action == "erase") {
  2267. handle_slots_erase(req, res, id_slot);
  2268. } else {
  2269. res_error(res, format_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST));
  2270. }
  2271. };
  2272. const auto handle_props = [&ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) {
  2273. json data = {
  2274. { "default_generation_settings", ctx_server.default_generation_settings_for_props },
  2275. { "total_slots", ctx_server.params.n_parallel },
  2276. { "chat_template", llama_get_chat_template(ctx_server.model) },
  2277. };
  2278. res_ok(res, data);
  2279. };
  2280. const auto handle_props_change = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2281. if (!ctx_server.params.endpoint_props) {
  2282. res_error(res, format_error_response("This server does not support changing global properties. Start it with `--props`", ERROR_TYPE_NOT_SUPPORTED));
  2283. return;
  2284. }
  2285. json data = json::parse(req.body);
  2286. // update any props here
  2287. res_ok(res, {{ "success", true }});
  2288. };
  2289. const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok](server_task_cmpl_type cmpl_type, json & data, httplib::Response & res) {
  2290. if (ctx_server.params.embedding || ctx_server.params.reranking) {
  2291. res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings` or `--reranking`", ERROR_TYPE_NOT_SUPPORTED));
  2292. return;
  2293. }
  2294. std::vector<server_task> tasks = ctx_server.create_tasks_cmpl(data, cmpl_type);
  2295. ctx_server.queue_results.add_waiting_tasks(tasks);
  2296. ctx_server.queue_tasks.post(tasks);
  2297. bool stream = json_value(data, "stream", false);
  2298. const auto task_ids = server_task::get_list_id(tasks);
  2299. if (!stream) {
  2300. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2301. if (results.size() == 1) {
  2302. // single result
  2303. res_ok(res, results[0].data);
  2304. } else {
  2305. // multiple results (multitask)
  2306. json arr = json::array();
  2307. for (const auto & res : results) {
  2308. arr.push_back(res.data);
  2309. }
  2310. res_ok(res, arr);
  2311. }
  2312. }, [&](const json & error_data) {
  2313. res_error(res, error_data);
  2314. });
  2315. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2316. } else {
  2317. const auto chunked_content_provider = [task_ids, &ctx_server](size_t, httplib::DataSink & sink) {
  2318. ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool {
  2319. return server_sent_event(sink, "data", result.data);
  2320. }, [&](const json & error_data) {
  2321. server_sent_event(sink, "error", error_data);
  2322. });
  2323. sink.done();
  2324. return false;
  2325. };
  2326. auto on_complete = [task_ids, &ctx_server] (bool) {
  2327. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2328. };
  2329. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2330. }
  2331. };
  2332. const auto handle_completions = [&handle_completions_generic](const httplib::Request & req, httplib::Response & res) {
  2333. json data = json::parse(req.body);
  2334. return handle_completions_generic(SERVER_TASK_CMPL_TYPE_NORMAL, data, res);
  2335. };
  2336. const auto handle_infill = [&ctx_server, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) {
  2337. std::string err;
  2338. if (llama_token_fim_pre(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2339. err += "prefix token is missing. ";
  2340. }
  2341. if (llama_token_fim_suf(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2342. err += "suffix token is missing. ";
  2343. }
  2344. if (llama_token_fim_mid(ctx_server.model) == LLAMA_TOKEN_NULL) {
  2345. err += "middle token is missing. ";
  2346. }
  2347. if (!err.empty()) {
  2348. res_error(res, format_error_response(string_format("Infill is not supported by this model: %s", err.c_str()), ERROR_TYPE_NOT_SUPPORTED));
  2349. return;
  2350. }
  2351. json data = json::parse(req.body);
  2352. return handle_completions_generic(SERVER_TASK_CMPL_TYPE_INFILL, data, res);
  2353. };
  2354. // TODO: maybe merge this function with "handle_completions_generic"
  2355. const auto handle_chat_completions = [&ctx_server, &params, &res_error, &res_ok, verbose](const httplib::Request & req, httplib::Response & res) {
  2356. if (ctx_server.params.embedding || ctx_server.params.reranking) {
  2357. res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings` or `--reranking`", ERROR_TYPE_NOT_SUPPORTED));
  2358. return;
  2359. }
  2360. json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
  2361. std::vector<server_task> tasks = ctx_server.create_tasks_cmpl(data, SERVER_TASK_CMPL_TYPE_NORMAL);
  2362. ctx_server.queue_results.add_waiting_tasks(tasks);
  2363. ctx_server.queue_tasks.post(tasks);
  2364. bool stream = json_value(data, "stream", false);
  2365. const auto task_ids = server_task::get_list_id(tasks);
  2366. const auto completion_id = gen_chatcmplid();
  2367. if (!stream) {
  2368. ctx_server.receive_cmpl_results(task_ids, [&](const std::vector<server_task_result> & results) {
  2369. // multitask is never support in chat completion, there is only one result
  2370. json result_oai = format_final_response_oaicompat(data, results[0].data, completion_id, /*.streaming =*/ false, verbose);
  2371. res_ok(res, result_oai);
  2372. }, [&](const json & error_data) {
  2373. res_error(res, error_data);
  2374. });
  2375. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2376. } else {
  2377. const auto chunked_content_provider = [task_ids, &ctx_server, completion_id](size_t, httplib::DataSink & sink) {
  2378. ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool {
  2379. std::vector<json> result_array = format_partial_response_oaicompat(result.data, completion_id);
  2380. for (auto & event_data : result_array) {
  2381. if (event_data.empty()) {
  2382. continue; // skip the stop token
  2383. }
  2384. if (!server_sent_event(sink, "data", event_data)) {
  2385. return false; // connection is closed
  2386. }
  2387. }
  2388. return true; // ok
  2389. }, [&](const json & error_data) {
  2390. server_sent_event(sink, "error", error_data);
  2391. });
  2392. static const std::string ev_done = "data: [DONE]\n\n";
  2393. sink.write(ev_done.data(), ev_done.size());
  2394. sink.done();
  2395. return true;
  2396. };
  2397. auto on_complete = [task_ids, &ctx_server] (bool) {
  2398. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2399. };
  2400. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2401. }
  2402. };
  2403. const auto handle_models = [&params, &ctx_server](const httplib::Request &, httplib::Response & res) {
  2404. json models = {
  2405. {"object", "list"},
  2406. {"data", {
  2407. {
  2408. {"id", params.model_alias},
  2409. {"object", "model"},
  2410. {"created", std::time(0)},
  2411. {"owned_by", "llamacpp"},
  2412. {"meta", ctx_server.model_meta()}
  2413. },
  2414. }}
  2415. };
  2416. res.set_content(models.dump(), MIMETYPE_JSON);
  2417. };
  2418. const auto handle_tokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2419. const json body = json::parse(req.body);
  2420. json tokens_response = json::array();
  2421. if (body.count("content") != 0) {
  2422. const bool add_special = json_value(body, "add_special", false);
  2423. const bool with_pieces = json_value(body, "with_pieces", false);
  2424. std::vector<llama_token> tokens = ctx_server.tokenize(body.at("content"), add_special, true);
  2425. if (with_pieces) {
  2426. for (const auto& token : tokens) {
  2427. std::string piece = common_token_to_piece(ctx_server.ctx, token);
  2428. json piece_json;
  2429. // Check if the piece is valid UTF-8
  2430. if (is_valid_utf8(piece)) {
  2431. piece_json = piece;
  2432. } else {
  2433. // If not valid UTF-8, store as array of byte values
  2434. piece_json = json::array();
  2435. for (unsigned char c : piece) {
  2436. piece_json.push_back(static_cast<int>(c));
  2437. }
  2438. }
  2439. tokens_response.push_back({
  2440. {"id", token},
  2441. {"piece", piece_json}
  2442. });
  2443. }
  2444. } else {
  2445. tokens_response = tokens;
  2446. }
  2447. }
  2448. const json data = format_tokenizer_response(tokens_response);
  2449. res_ok(res, data);
  2450. };
  2451. const auto handle_detokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2452. const json body = json::parse(req.body);
  2453. std::string content;
  2454. if (body.count("tokens") != 0) {
  2455. const std::vector<llama_token> tokens = body.at("tokens");
  2456. content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
  2457. }
  2458. const json data = format_detokenized_response(content);
  2459. res_ok(res, data);
  2460. };
  2461. const auto handle_embeddings = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2462. // TODO: somehow clean up this checks in the future
  2463. if (!ctx_server.params.embedding || ctx_server.params.reranking) {
  2464. res_error(res, format_error_response("This server does not support embeddings. Start it with `--embeddings` and without `--reranking`", ERROR_TYPE_NOT_SUPPORTED));
  2465. return;
  2466. }
  2467. const json body = json::parse(req.body);
  2468. bool is_openai = false;
  2469. // an input prompt can be a string or a list of tokens (integer)
  2470. json prompt;
  2471. if (body.count("input") != 0) {
  2472. is_openai = true;
  2473. prompt = body.at("input");
  2474. } else if (body.count("content") != 0) {
  2475. // with "content", we only support single prompt
  2476. prompt = std::vector<std::string>{body.at("content")};
  2477. } else {
  2478. res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2479. return;
  2480. }
  2481. // create and queue the task
  2482. json responses = json::array();
  2483. bool error = false;
  2484. {
  2485. std::vector<server_task> tasks = ctx_server.create_tasks_cmpl({{"prompt", prompt}}, SERVER_TASK_CMPL_TYPE_EMBEDDING);
  2486. ctx_server.queue_results.add_waiting_tasks(tasks);
  2487. ctx_server.queue_tasks.post(tasks);
  2488. // get the result
  2489. std::unordered_set<int> task_ids = server_task::get_list_id(tasks);
  2490. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2491. for (const auto & res : results) {
  2492. responses.push_back(res.data);
  2493. }
  2494. }, [&](const json & error_data) {
  2495. res_error(res, error_data);
  2496. error = true;
  2497. });
  2498. ctx_server.queue_results.remove_waiting_task_ids(task_ids);
  2499. }
  2500. if (error) {
  2501. return;
  2502. }
  2503. // write JSON response
  2504. json root = is_openai
  2505. ? format_embeddings_response_oaicompat(body, responses)
  2506. : responses[0];
  2507. res_ok(res, root);
  2508. };
  2509. const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) {
  2510. if (!ctx_server.params.reranking) {
  2511. res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking`", ERROR_TYPE_NOT_SUPPORTED));
  2512. return;
  2513. }
  2514. const json body = json::parse(req.body);
  2515. // TODO: implement
  2516. //int top_n = 1;
  2517. //if (body.count("top_n") != 1) {
  2518. // top_n = body.at("top_n");
  2519. //} else {
  2520. // res_error(res, format_error_response("\"top_n\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2521. // return;
  2522. //}
  2523. json query;
  2524. if (body.count("query") == 1) {
  2525. query = body.at("query");
  2526. if (!query.is_string()) {
  2527. res_error(res, format_error_response("\"query\" must be a string", ERROR_TYPE_INVALID_REQUEST));
  2528. return;
  2529. }
  2530. } else {
  2531. res_error(res, format_error_response("\"query\" must be provided", ERROR_TYPE_INVALID_REQUEST));
  2532. return;
  2533. }
  2534. std::vector<std::string> documents = json_value(body, "documents", std::vector<std::string>());
  2535. if (documents.empty()) {
  2536. res_error(res, format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST));
  2537. return;
  2538. }
  2539. // construct prompt object: array of ["query", "doc0", "doc1", ...]
  2540. json prompt;
  2541. prompt.push_back(query);
  2542. for (const auto & doc : documents) {
  2543. prompt.push_back(doc);
  2544. }
  2545. LOG_DBG("rerank prompt: %s\n", prompt.dump().c_str());
  2546. // create and queue the task
  2547. json responses = json::array();
  2548. bool error = false;
  2549. {
  2550. std::vector<server_task> tasks = ctx_server.create_tasks_cmpl({{"prompt", prompt}}, SERVER_TASK_CMPL_TYPE_RERANK);
  2551. ctx_server.queue_results.add_waiting_tasks(tasks);
  2552. ctx_server.queue_tasks.post(tasks);
  2553. // get the result
  2554. std::unordered_set<int> task_ids = server_task::get_list_id(tasks);
  2555. ctx_server.receive_cmpl_results(task_ids, [&](std::vector<server_task_result> & results) {
  2556. for (const auto & res : results) {
  2557. responses.push_back(res.data);
  2558. }
  2559. }, [&](const json & error_data) {
  2560. res_error(res, error_data);
  2561. error = true;
  2562. });
  2563. }
  2564. if (error) {
  2565. return;
  2566. }
  2567. // write JSON response
  2568. json root = format_response_rerank(body, responses);
  2569. res_ok(res, root);
  2570. };
  2571. const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) {
  2572. json result = json::array();
  2573. for (size_t i = 0; i < ctx_server.loras.size(); ++i) {
  2574. auto & lora = ctx_server.loras[i];
  2575. result.push_back({
  2576. {"id", i},
  2577. {"path", lora.path},
  2578. {"scale", lora.scale},
  2579. });
  2580. }
  2581. res_ok(res, result);
  2582. res.status = 200; // HTTP OK
  2583. };
  2584. const auto handle_lora_adapters_apply = [&](const httplib::Request & req, httplib::Response & res) {
  2585. const std::vector<json> body = json::parse(req.body);
  2586. int max_idx = ctx_server.loras.size();
  2587. // clear existing value
  2588. for (auto & lora : ctx_server.loras) {
  2589. lora.scale = 0.0f;
  2590. }
  2591. // set value
  2592. for (auto entry : body) {
  2593. int id = entry.at("id");
  2594. float scale = entry.at("scale");
  2595. if (0 <= id && id < max_idx) {
  2596. ctx_server.loras[id].scale = scale;
  2597. } else {
  2598. throw std::runtime_error("invalid adapter id");
  2599. }
  2600. }
  2601. server_task task;
  2602. task.type = SERVER_TASK_TYPE_SET_LORA;
  2603. const int id_task = ctx_server.queue_tasks.post(task);
  2604. ctx_server.queue_results.add_waiting_task_id(id_task);
  2605. server_task_result result = ctx_server.queue_results.recv(id_task);
  2606. ctx_server.queue_results.remove_waiting_task_id(id_task);
  2607. res_ok(res, result.data);
  2608. res.status = 200; // HTTP OK
  2609. };
  2610. auto handle_static_file = [](unsigned char * content, size_t len, const char * mime_type) {
  2611. return [content, len, mime_type](const httplib::Request &, httplib::Response & res) {
  2612. res.set_content(reinterpret_cast<const char*>(content), len, mime_type);
  2613. return false;
  2614. };
  2615. };
  2616. //
  2617. // Router
  2618. //
  2619. // register static assets routes
  2620. if (!params.public_path.empty()) {
  2621. // Set the base directory for serving static files
  2622. svr->set_base_dir(params.public_path);
  2623. }
  2624. if (!params.api_keys.empty()) {
  2625. // for now, if API key is set, web UI is unusable
  2626. svr->Get("/", [&](const httplib::Request &, httplib::Response & res) {
  2627. return res.set_content("Web UI is disabled because API key is set.", "text/html; charset=utf-8");
  2628. });
  2629. } else {
  2630. // using embedded static files
  2631. svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8"));
  2632. svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8"));
  2633. svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8"));
  2634. svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8"));
  2635. // add new-ui files
  2636. svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8"));
  2637. svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8"));
  2638. svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8"));
  2639. svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8"));
  2640. svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8"));
  2641. svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8"));
  2642. svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8"));
  2643. svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8"));
  2644. svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8"));
  2645. svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8"));
  2646. svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8"));
  2647. }
  2648. // register API routes
  2649. svr->Get ("/health", handle_health); // public endpoint (no API key check)
  2650. svr->Get ("/metrics", handle_metrics);
  2651. svr->Get ("/props", handle_props);
  2652. svr->Post("/props", handle_props_change);
  2653. svr->Get ("/models", handle_models); // public endpoint (no API key check)
  2654. svr->Get ("/v1/models", handle_models); // public endpoint (no API key check)
  2655. svr->Post("/completion", handle_completions); // legacy
  2656. svr->Post("/completions", handle_completions);
  2657. svr->Post("/v1/completions", handle_completions);
  2658. svr->Post("/chat/completions", handle_chat_completions);
  2659. svr->Post("/v1/chat/completions", handle_chat_completions);
  2660. svr->Post("/infill", handle_infill);
  2661. svr->Post("/embedding", handle_embeddings); // legacy
  2662. svr->Post("/embeddings", handle_embeddings);
  2663. svr->Post("/v1/embeddings", handle_embeddings);
  2664. svr->Post("/rerank", handle_rerank);
  2665. svr->Post("/reranking", handle_rerank);
  2666. svr->Post("/v1/rerank", handle_rerank);
  2667. svr->Post("/v1/reranking", handle_rerank);
  2668. svr->Post("/tokenize", handle_tokenize);
  2669. svr->Post("/detokenize", handle_detokenize);
  2670. // LoRA adapters hotswap
  2671. svr->Get ("/lora-adapters", handle_lora_adapters_list);
  2672. svr->Post("/lora-adapters", handle_lora_adapters_apply);
  2673. // Save & load slots
  2674. svr->Get ("/slots", handle_slots);
  2675. svr->Post("/slots/:id_slot", handle_slots_action);
  2676. //
  2677. // Start the server
  2678. //
  2679. if (params.n_threads_http < 1) {
  2680. // +2 threads for monitoring endpoints
  2681. params.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2682. }
  2683. log_data["n_threads_http"] = std::to_string(params.n_threads_http);
  2684. svr->new_task_queue = [&params] { return new httplib::ThreadPool(params.n_threads_http); };
  2685. // clean up function, to be called before exit
  2686. auto clean_up = [&svr]() {
  2687. svr->stop();
  2688. llama_backend_free();
  2689. };
  2690. // bind HTTP listen port, run the HTTP server in a thread
  2691. if (!svr->bind_to_port(params.hostname, params.port)) {
  2692. //LOG_ERROR("couldn't bind HTTP server socket", {
  2693. // {"hostname", params.hostname},
  2694. // {"port", params.port},
  2695. //});
  2696. LOG_ERR("%s: couldn't bind HTTP server socket, hostname: %s, port: %d\n", __func__, params.hostname.c_str(), params.port);
  2697. clean_up();
  2698. return 1;
  2699. }
  2700. std::thread t([&]() { svr->listen_after_bind(); });
  2701. svr->wait_until_ready();
  2702. LOG_INF("%s: HTTP server is listening, hostname: %s, port: %d, http threads: %d\n", __func__, params.hostname.c_str(), params.port, params.n_threads_http);
  2703. // load the model
  2704. LOG_INF("%s: loading model\n", __func__);
  2705. if (!ctx_server.load_model(params)) {
  2706. clean_up();
  2707. t.join();
  2708. LOG_ERR("%s: exiting due to model loading error\n", __func__);
  2709. return 1;
  2710. }
  2711. ctx_server.init();
  2712. state.store(SERVER_STATE_READY);
  2713. LOG_INF("%s: model loaded\n", __func__);
  2714. // if a custom chat template is not supplied, we will use the one that comes with the model (if any)
  2715. if (params.chat_template.empty()) {
  2716. if (!ctx_server.validate_model_chat_template()) {
  2717. LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
  2718. params.chat_template = "chatml";
  2719. }
  2720. }
  2721. // print sample chat example to make it clear which template is used
  2722. LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str());
  2723. ctx_server.queue_tasks.on_new_task(std::bind(
  2724. &server_context::process_single_task, &ctx_server, std::placeholders::_1));
  2725. ctx_server.queue_tasks.on_update_slots(std::bind(
  2726. &server_context::update_slots, &ctx_server));
  2727. shutdown_handler = [&](int) {
  2728. ctx_server.queue_tasks.terminate();
  2729. };
  2730. LOG_INF("%s: server is listening on %s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port);
  2731. ctx_server.queue_tasks.start_loop();
  2732. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2733. struct sigaction sigint_action;
  2734. sigint_action.sa_handler = signal_handler;
  2735. sigemptyset (&sigint_action.sa_mask);
  2736. sigint_action.sa_flags = 0;
  2737. sigaction(SIGINT, &sigint_action, NULL);
  2738. sigaction(SIGTERM, &sigint_action, NULL);
  2739. #elif defined (_WIN32)
  2740. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2741. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2742. };
  2743. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2744. #endif
  2745. clean_up();
  2746. t.join();
  2747. return 0;
  2748. }