server.cpp 123 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "grammar-parser.h"
  4. #include "utils.hpp"
  5. #include "oai.hpp"
  6. #include "../llava/clip.h"
  7. #include "stb_image.h"
  8. #ifndef NDEBUG
  9. // crash the server in debug mode, otherwise send an http 500 error
  10. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  11. #endif
  12. // increase max payload length to allow use of larger context size
  13. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  14. #include "httplib.h"
  15. #include "json.hpp"
  16. // auto generated files (update with ./deps.sh)
  17. #include "index.html.hpp"
  18. #include "index.js.hpp"
  19. #include "completion.js.hpp"
  20. #include "json-schema-to-grammar.mjs.hpp"
  21. #include <cstddef>
  22. #include <thread>
  23. #include <chrono>
  24. #include <condition_variable>
  25. #include <atomic>
  26. #include <signal.h>
  27. using json = nlohmann::json;
  28. struct server_params
  29. {
  30. std::string hostname = "127.0.0.1";
  31. std::vector<std::string> api_keys;
  32. std::string public_path = "examples/server/public";
  33. std::string chat_template = "chatml";
  34. int32_t port = 8080;
  35. int32_t read_timeout = 600;
  36. int32_t write_timeout = 600;
  37. bool slots_endpoint = true;
  38. };
  39. bool server_verbose = false;
  40. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  41. {
  42. size_t i;
  43. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  44. {
  45. }
  46. return i;
  47. }
  48. enum stop_type
  49. {
  50. STOP_FULL,
  51. STOP_PARTIAL,
  52. };
  53. static bool ends_with(const std::string &str, const std::string &suffix)
  54. {
  55. return str.size() >= suffix.size() &&
  56. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  57. }
  58. static size_t find_partial_stop_string(const std::string &stop,
  59. const std::string &text)
  60. {
  61. if (!text.empty() && !stop.empty())
  62. {
  63. const char text_last_char = text.back();
  64. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  65. {
  66. if (stop[char_index] == text_last_char)
  67. {
  68. const std::string current_partial = stop.substr(0, char_index + 1);
  69. if (ends_with(text, current_partial))
  70. {
  71. return text.size() - char_index - 1;
  72. }
  73. }
  74. }
  75. }
  76. return std::string::npos;
  77. }
  78. // TODO: reuse llama_detokenize
  79. template <class Iter>
  80. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  81. {
  82. std::string ret;
  83. for (; begin != end; ++begin)
  84. {
  85. ret += llama_token_to_piece(ctx, *begin);
  86. }
  87. return ret;
  88. }
  89. // format incomplete utf-8 multibyte character for output
  90. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  91. {
  92. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  93. // if the size is 1 and first bit is 1, meaning it's a partial character
  94. // (size > 1 meaning it's already a known token)
  95. if (out.size() == 1 && (out[0] & 0x80) == 0x80)
  96. {
  97. std::stringstream ss;
  98. ss << std::hex << (out[0] & 0xff);
  99. std::string res(ss.str());
  100. out = "byte: \\x" + res;
  101. }
  102. return out;
  103. }
  104. // convert a vector of completion_token_output to json
  105. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> &probs)
  106. {
  107. json out = json::array();
  108. for (const auto &prob : probs)
  109. {
  110. json probs_for_token = json::array();
  111. for (const auto &p : prob.probs)
  112. {
  113. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  114. probs_for_token.push_back(json
  115. {
  116. {"tok_str", tok_str},
  117. {"prob", p.prob},
  118. });
  119. }
  120. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  121. out.push_back(json{
  122. {"content", tok_str},
  123. {"probs", probs_for_token},
  124. });
  125. }
  126. return out;
  127. }
  128. struct llama_client_slot
  129. {
  130. int id;
  131. int task_id = -1;
  132. struct slot_params params;
  133. slot_state state = IDLE;
  134. slot_command command = NONE;
  135. // used to determine the slot that has been used the longest
  136. int64_t t_last_used = -1;
  137. // generation props
  138. int32_t n_ctx = 0; // context size per slot
  139. int32_t n_past = 0;
  140. int32_t n_decoded = 0;
  141. int32_t n_remaining = -1;
  142. int32_t i_batch = -1;
  143. int32_t n_predict = -1;
  144. int32_t num_prompt_tokens = 0;
  145. int32_t num_prompt_tokens_processed = 0;
  146. json prompt;
  147. std::string generated_text;
  148. llama_token sampled;
  149. std::vector<llama_token> cache_tokens;
  150. std::vector<completion_token_output> generated_token_probs;
  151. bool infill = false;
  152. bool embedding = false;
  153. bool has_next_token = true;
  154. bool truncated = false;
  155. bool stopped_eos = false;
  156. bool stopped_word = false;
  157. bool stopped_limit = false;
  158. bool oaicompat = false;
  159. std::string oaicompat_model;
  160. std::string stopping_word;
  161. // sampling
  162. struct llama_sampling_params sparams;
  163. llama_sampling_context *ctx_sampling = nullptr;
  164. int32_t ga_i = 0; // group-attention state
  165. int32_t ga_n = 1; // group-attention factor
  166. int32_t ga_w = 512; // group-attention width
  167. int32_t n_past_se = 0; // self-extend
  168. // multimodal
  169. std::vector<slot_image> images;
  170. // stats
  171. size_t sent_count = 0;
  172. size_t sent_token_probs_index = 0;
  173. int64_t t_start_process_prompt;
  174. int64_t t_start_genereration;
  175. double t_prompt_processing; // ms
  176. double t_token_generation; // ms
  177. // multitasks
  178. int multitask_id = -1;
  179. void reset() {
  180. num_prompt_tokens = 0;
  181. generated_text = "";
  182. truncated = false;
  183. stopped_eos = false;
  184. stopped_word = false;
  185. stopped_limit = false;
  186. stopping_word = "";
  187. n_past = 0;
  188. sent_count = 0;
  189. sent_token_probs_index = 0;
  190. infill = false;
  191. ga_i = 0;
  192. n_past_se = 0;
  193. generated_token_probs.clear();
  194. for (slot_image & img : images)
  195. {
  196. free(img.image_embedding);
  197. if (img.img_data) {
  198. clip_image_u8_free(img.img_data);
  199. }
  200. img.prefix_prompt = "";
  201. }
  202. images.clear();
  203. }
  204. bool has_budget(gpt_params &global_params) {
  205. if (params.n_predict == -1 && global_params.n_predict == -1)
  206. {
  207. return true; // limitless
  208. }
  209. n_remaining = -1;
  210. if (params.n_predict != -1)
  211. {
  212. n_remaining = params.n_predict - n_decoded;
  213. }
  214. else if (global_params.n_predict != -1)
  215. {
  216. n_remaining = global_params.n_predict - n_decoded;
  217. }
  218. return n_remaining > 0; // no budget
  219. }
  220. bool available() const {
  221. return state == IDLE && command == NONE;
  222. }
  223. bool is_processing() const {
  224. return (state == IDLE && command == LOAD_PROMPT) || state == PROCESSING;
  225. }
  226. void add_token_string(const completion_token_output &token) {
  227. if (command == RELEASE)
  228. {
  229. return;
  230. }
  231. cache_tokens.push_back(token.tok);
  232. generated_token_probs.push_back(token);
  233. }
  234. void release() {
  235. if (state == PROCESSING)
  236. {
  237. t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3;
  238. command = RELEASE;
  239. }
  240. }
  241. json get_formated_timings() {
  242. return json
  243. {
  244. {"prompt_n", num_prompt_tokens_processed},
  245. {"prompt_ms", t_prompt_processing},
  246. {"prompt_per_token_ms", t_prompt_processing / num_prompt_tokens_processed},
  247. {"prompt_per_second", 1e3 / t_prompt_processing * num_prompt_tokens_processed},
  248. {"predicted_n", n_decoded},
  249. {"predicted_ms", t_token_generation},
  250. {"predicted_per_token_ms", t_token_generation / n_decoded},
  251. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  252. };
  253. }
  254. void print_timings() const {
  255. LOG_TEE("\n");
  256. LOG_TEE("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  257. __func__, t_prompt_processing, num_prompt_tokens_processed, t_prompt_processing / num_prompt_tokens_processed, 1e3 / t_prompt_processing * num_prompt_tokens_processed);
  258. LOG_TEE("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  259. __func__, t_token_generation, n_decoded,t_token_generation / n_decoded, 1e3 / t_token_generation * n_decoded);
  260. LOG_TEE("%s: total time = %10.2f ms\n", __func__, t_prompt_processing + t_token_generation);
  261. }
  262. };
  263. struct llama_server_context
  264. {
  265. llama_model *model = nullptr;
  266. llama_context *ctx = nullptr;
  267. clip_ctx *clp_ctx = nullptr;
  268. gpt_params params;
  269. llama_batch batch;
  270. bool multimodal = false;
  271. bool clean_kv_cache = true;
  272. bool all_slots_are_idle = false;
  273. bool add_bos_token = true;
  274. int32_t n_ctx; // total context for all clients / slots
  275. // system prompt
  276. bool system_need_update = false;
  277. std::string system_prompt;
  278. std::vector<llama_token> system_tokens;
  279. std::string name_user; // this should be the antiprompt
  280. std::string name_assistant;
  281. // slots / clients
  282. std::vector<llama_client_slot> slots;
  283. json default_generation_settings_for_props;
  284. llama_server_queue queue_tasks;
  285. llama_server_response queue_results;
  286. ~llama_server_context()
  287. {
  288. if (ctx)
  289. {
  290. llama_free(ctx);
  291. ctx = nullptr;
  292. }
  293. if (model)
  294. {
  295. llama_free_model(model);
  296. model = nullptr;
  297. }
  298. }
  299. bool load_model(const gpt_params &params_)
  300. {
  301. params = params_;
  302. if (!params.mmproj.empty()) {
  303. multimodal = true;
  304. LOG_TEE("Multi Modal Mode Enabled");
  305. clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
  306. if(clp_ctx == nullptr) {
  307. LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
  308. return false;
  309. }
  310. if (params.n_ctx < 2048) { // request larger context for the image embedding
  311. params.n_ctx = 2048;
  312. }
  313. }
  314. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  315. if (model == nullptr)
  316. {
  317. LOG_ERROR("unable to load model", {{"model", params.model}});
  318. return false;
  319. }
  320. if (multimodal) {
  321. const int n_embd_clip = clip_n_mmproj_embd(clp_ctx);
  322. const int n_embd_llm = llama_n_embd(model);
  323. if (n_embd_clip != n_embd_llm) {
  324. LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm);
  325. llama_free(ctx);
  326. llama_free_model(model);
  327. return false;
  328. }
  329. }
  330. n_ctx = llama_n_ctx(ctx);
  331. add_bos_token = llama_should_add_bos_token(model);
  332. return true;
  333. }
  334. void initialize() {
  335. // create slots
  336. all_slots_are_idle = true;
  337. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  338. LOG_TEE("Available slots:\n");
  339. for (int i = 0; i < params.n_parallel; i++)
  340. {
  341. llama_client_slot slot;
  342. slot.id = i;
  343. slot.n_ctx = n_ctx_slot;
  344. slot.n_predict = params.n_predict;
  345. LOG_TEE(" -> Slot %i - max context: %i\n", slot.id, n_ctx_slot);
  346. const int ga_n = params.grp_attn_n;
  347. const int ga_w = params.grp_attn_w;
  348. if (ga_n != 1) {
  349. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  350. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  351. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  352. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  353. LOG_TEE(" -> Slot %i - self-extend: ga_n = %d, ga_w = %d\n", slot.id, ga_n, ga_w);
  354. }
  355. slot.ga_i = 0;
  356. slot.ga_n = ga_n;
  357. slot.ga_w = ga_w;
  358. slot.reset();
  359. slots.push_back(slot);
  360. }
  361. default_generation_settings_for_props = get_formated_generation(slots.front());
  362. default_generation_settings_for_props["seed"] = -1;
  363. batch = llama_batch_init(n_ctx, 0, params.n_parallel);
  364. }
  365. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  366. {
  367. // TODO: currently, we tokenize using special tokens by default
  368. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  369. // but it's better compared to completely ignoring ChatML and other chat templates
  370. const bool TMP_FORCE_SPECIAL = true;
  371. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  372. // or the first element of the json_prompt array is a string.
  373. std::vector<llama_token> prompt_tokens;
  374. if (json_prompt.is_array())
  375. {
  376. bool first = true;
  377. for (const auto& p : json_prompt)
  378. {
  379. if (p.is_string())
  380. {
  381. auto s = p.template get<std::string>();
  382. std::vector<llama_token> p;
  383. if (first)
  384. {
  385. p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  386. first = false;
  387. }
  388. else
  389. {
  390. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  391. }
  392. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  393. }
  394. else
  395. {
  396. if (first)
  397. {
  398. first = false;
  399. }
  400. prompt_tokens.push_back(p.template get<llama_token>());
  401. }
  402. }
  403. }
  404. else
  405. {
  406. auto s = json_prompt.template get<std::string>();
  407. prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  408. }
  409. return prompt_tokens;
  410. }
  411. llama_client_slot* get_slot(int id) {
  412. int64_t t_last = ggml_time_us();
  413. llama_client_slot *last_used = nullptr;
  414. for (llama_client_slot & slot : slots)
  415. {
  416. if (slot.id == id && slot.available())
  417. {
  418. return &slot;
  419. }
  420. if (slot.available() && slot.t_last_used < t_last)
  421. {
  422. last_used = &slot;
  423. t_last = slot.t_last_used;
  424. }
  425. }
  426. return last_used;
  427. }
  428. bool launch_slot_with_data(llama_client_slot* &slot, json data) {
  429. slot_params default_params;
  430. llama_sampling_params default_sparams;
  431. if (data.count("__oaicompat") != 0) {
  432. slot->oaicompat = true;
  433. slot->oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  434. } else {
  435. slot->oaicompat = false;
  436. slot->oaicompat_model = "";
  437. }
  438. slot->params.stream = json_value(data, "stream", false);
  439. slot->params.cache_prompt = json_value(data, "cache_prompt", false);
  440. slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  441. slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  442. slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  443. slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  444. slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  445. slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  446. slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
  447. slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  448. slot->sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  449. slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  450. slot->sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  451. slot->sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  452. slot->sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  453. slot->sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  454. slot->sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  455. slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  456. slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  457. slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
  458. slot->params.seed = json_value(data, "seed", default_params.seed);
  459. slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  460. slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  461. slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  462. if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
  463. // Might be better to reject the request with a 400 ?
  464. LOG_WARNING("Max tokens to predict exceeds server configuration", {
  465. {"params.n_predict", slot->params.n_predict},
  466. {"slot.n_predict", slot->n_predict},
  467. });
  468. slot->params.n_predict = slot->n_predict;
  469. }
  470. // infill
  471. if (data.count("input_prefix") != 0)
  472. {
  473. slot->params.input_prefix = data["input_prefix"];
  474. }
  475. else
  476. {
  477. slot->params.input_prefix = "";
  478. }
  479. if (data.count("input_suffix") != 0)
  480. {
  481. slot->params.input_suffix = data["input_suffix"];
  482. }
  483. else
  484. {
  485. slot->params.input_suffix = "";
  486. }
  487. if (data.count("prompt") != 0)
  488. {
  489. slot->prompt = data["prompt"];
  490. }
  491. else
  492. {
  493. slot->prompt = "";
  494. }
  495. slot->sparams.penalty_prompt_tokens.clear();
  496. slot->sparams.use_penalty_prompt_tokens = false;
  497. const auto &penalty_prompt = data.find("penalty_prompt");
  498. if (penalty_prompt != data.end())
  499. {
  500. if (penalty_prompt->is_string())
  501. {
  502. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  503. auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
  504. slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
  505. if (slot->params.n_predict > 0)
  506. {
  507. slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
  508. }
  509. slot->sparams.use_penalty_prompt_tokens = true;
  510. }
  511. else if (penalty_prompt->is_array())
  512. {
  513. const auto n_tokens = penalty_prompt->size();
  514. slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
  515. const int n_vocab = llama_n_vocab(model);
  516. for (const auto &penalty_token : *penalty_prompt)
  517. {
  518. if (penalty_token.is_number_integer())
  519. {
  520. const auto tok = penalty_token.get<llama_token>();
  521. if (tok >= 0 && tok < n_vocab)
  522. {
  523. slot->sparams.penalty_prompt_tokens.push_back(tok);
  524. }
  525. }
  526. }
  527. slot->sparams.use_penalty_prompt_tokens = true;
  528. }
  529. }
  530. slot->sparams.logit_bias.clear();
  531. if (json_value(data, "ignore_eos", false))
  532. {
  533. slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  534. }
  535. const auto &logit_bias = data.find("logit_bias");
  536. if (logit_bias != data.end() && logit_bias->is_array())
  537. {
  538. const int n_vocab = llama_n_vocab(model);
  539. for (const auto &el : *logit_bias)
  540. {
  541. if (el.is_array() && el.size() == 2)
  542. {
  543. float bias;
  544. if (el[1].is_number())
  545. {
  546. bias = el[1].get<float>();
  547. }
  548. else if (el[1].is_boolean() && !el[1].get<bool>())
  549. {
  550. bias = -INFINITY;
  551. }
  552. else
  553. {
  554. continue;
  555. }
  556. if (el[0].is_number_integer())
  557. {
  558. llama_token tok = el[0].get<llama_token>();
  559. if (tok >= 0 && tok < n_vocab)
  560. {
  561. slot->sparams.logit_bias[tok] = bias;
  562. }
  563. }
  564. else if (el[0].is_string())
  565. {
  566. auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
  567. for (auto tok : toks)
  568. {
  569. slot->sparams.logit_bias[tok] = bias;
  570. }
  571. }
  572. }
  573. }
  574. }
  575. slot->params.antiprompt.clear();
  576. const auto &stop = data.find("stop");
  577. if (stop != data.end() && stop->is_array())
  578. {
  579. for (const auto &word : *stop)
  580. {
  581. if (!word.empty())
  582. {
  583. slot->params.antiprompt.push_back(word);
  584. }
  585. }
  586. }
  587. const auto &samplers_sequence = data.find("samplers");
  588. if (samplers_sequence != data.end() && samplers_sequence->is_array())
  589. {
  590. std::vector<std::string> sampler_names;
  591. for (const auto &sampler_name : *samplers_sequence)
  592. {
  593. if (sampler_name.is_string())
  594. {
  595. sampler_names.emplace_back(sampler_name);
  596. }
  597. }
  598. slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
  599. }
  600. else
  601. {
  602. slot->sparams.samplers_sequence = default_sparams.samplers_sequence;
  603. }
  604. if (multimodal)
  605. {
  606. const auto &images_data = data.find("image_data");
  607. if (images_data != data.end() && images_data->is_array())
  608. {
  609. for (const auto &img : *images_data)
  610. {
  611. const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
  612. slot_image img_sl;
  613. img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
  614. img_sl.img_data = clip_image_u8_init();
  615. if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
  616. {
  617. LOG_TEE("slot %i - failed to load image [id: %i]\n", slot->id, img_sl.id);
  618. return false;
  619. }
  620. LOG_TEE("slot %i - loaded image\n", slot->id);
  621. img_sl.request_encode_image = true;
  622. slot->images.push_back(img_sl);
  623. }
  624. // process prompt
  625. // example: system prompt [img-102] user [img-103] describe [img-134] -> [{id: 102, prefix: 'system prompt '}, {id: 103, prefix: ' user '}, {id: 134, prefix: ' describe '}]}
  626. if (slot->images.size() > 0 && !slot->prompt.is_array())
  627. {
  628. std::string prompt = slot->prompt.get<std::string>();
  629. size_t pos = 0, begin_prefix = 0;
  630. std::string pattern = "[img-";
  631. while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
  632. size_t end_prefix = pos;
  633. pos += pattern.length();
  634. size_t end_pos = prompt.find(']', pos);
  635. if (end_pos != std::string::npos)
  636. {
  637. std::string image_id = prompt.substr(pos, end_pos - pos);
  638. try
  639. {
  640. int img_id = std::stoi(image_id);
  641. bool found = false;
  642. for (slot_image &img : slot->images)
  643. {
  644. if (img.id == img_id) {
  645. found = true;
  646. img.prefix_prompt = prompt.substr(begin_prefix, end_prefix - begin_prefix);
  647. begin_prefix = end_pos + 1;
  648. break;
  649. }
  650. }
  651. if (!found) {
  652. LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id);
  653. slot->images.clear();
  654. return false;
  655. }
  656. } catch (const std::invalid_argument& e) {
  657. LOG_TEE("Invalid image number id in prompt\n");
  658. slot->images.clear();
  659. return false;
  660. }
  661. }
  662. }
  663. slot->prompt = "";
  664. slot->params.input_suffix = prompt.substr(begin_prefix);
  665. slot->params.cache_prompt = false; // multimodal doesn't support cache prompt
  666. }
  667. }
  668. }
  669. if (slot->ctx_sampling != nullptr)
  670. {
  671. llama_sampling_free(slot->ctx_sampling);
  672. }
  673. slot->ctx_sampling = llama_sampling_init(slot->sparams);
  674. llama_set_rng_seed(ctx, slot->params.seed);
  675. slot->command = LOAD_PROMPT;
  676. all_slots_are_idle = false;
  677. LOG_TEE("slot %i is processing [task id: %i]\n", slot->id, slot->task_id);
  678. return true;
  679. }
  680. void kv_cache_clear() {
  681. // clear the entire KV cache
  682. llama_kv_cache_clear(ctx);
  683. clean_kv_cache = false;
  684. }
  685. void update_system_prompt() {
  686. kv_cache_clear();
  687. system_tokens.clear();
  688. if (!system_prompt.empty()) {
  689. system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
  690. llama_batch_clear(batch);
  691. for (int i = 0; i < (int)system_tokens.size(); ++i)
  692. {
  693. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  694. }
  695. if (llama_decode(ctx, batch) != 0)
  696. {
  697. LOG_TEE("%s: llama_decode() failed\n", __func__);
  698. return;
  699. }
  700. // assign the system KV cache to all parallel sequences
  701. for (int32_t i = 1; i < params.n_parallel; ++i)
  702. {
  703. llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
  704. }
  705. }
  706. LOG_TEE("system prompt updated\n");
  707. system_need_update = false;
  708. }
  709. void notify_system_prompt_changed() {
  710. // release all slots
  711. for (llama_client_slot &slot : slots)
  712. {
  713. slot.release();
  714. }
  715. system_need_update = true;
  716. }
  717. void process_system_prompt_data(const json &sys_props) {
  718. system_prompt = sys_props.value("prompt", "");
  719. name_user = sys_props.value("anti_prompt", "");
  720. name_assistant = sys_props.value("assistant_name", "");
  721. notify_system_prompt_changed();
  722. }
  723. static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
  724. const stop_type type, llama_client_slot &slot)
  725. {
  726. size_t stop_pos = std::string::npos;
  727. for (const std::string &word : slot.params.antiprompt)
  728. {
  729. size_t pos;
  730. if (type == STOP_FULL)
  731. {
  732. const size_t tmp = word.size() + last_token_size;
  733. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  734. pos = text.find(word, from_pos);
  735. }
  736. else
  737. {
  738. pos = find_partial_stop_string(word, text);
  739. }
  740. if (pos != std::string::npos &&
  741. (stop_pos == std::string::npos || pos < stop_pos))
  742. {
  743. if (type == STOP_FULL)
  744. {
  745. slot.stopped_word = true;
  746. slot.stopping_word = word;
  747. slot.has_next_token = false;
  748. }
  749. stop_pos = pos;
  750. }
  751. }
  752. return stop_pos;
  753. }
  754. bool process_token(completion_token_output &result, llama_client_slot &slot) {
  755. // remember which tokens were sampled - used for repetition penalties during sampling
  756. const std::string token_str = llama_token_to_piece(ctx, result.tok);
  757. slot.sampled = result.tok;
  758. // search stop word and delete it
  759. slot.generated_text += token_str;
  760. slot.has_next_token = true;
  761. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
  762. {
  763. // we can change penalty_prompt_tokens because it is always created from scratch each request
  764. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  765. }
  766. // check if there is incomplete UTF-8 character at the end
  767. bool incomplete = false;
  768. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
  769. {
  770. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  771. if ((c & 0xC0) == 0x80)
  772. {
  773. // continuation byte: 10xxxxxx
  774. continue;
  775. }
  776. if ((c & 0xE0) == 0xC0)
  777. {
  778. // 2-byte character: 110xxxxx ...
  779. incomplete = i < 2;
  780. }
  781. else if ((c & 0xF0) == 0xE0)
  782. {
  783. // 3-byte character: 1110xxxx ...
  784. incomplete = i < 3;
  785. }
  786. else if ((c & 0xF8) == 0xF0)
  787. {
  788. // 4-byte character: 11110xxx ...
  789. incomplete = i < 4;
  790. }
  791. // else 1-byte character or invalid byte
  792. break;
  793. }
  794. if (!incomplete)
  795. {
  796. size_t pos = std::min(slot.sent_count, slot.generated_text.size());
  797. const std::string str_test = slot.generated_text.substr(pos);
  798. bool is_stop_full = false;
  799. size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot);
  800. if (stop_pos != std::string::npos)
  801. {
  802. is_stop_full = true;
  803. slot.generated_text.erase(
  804. slot.generated_text.begin() + pos + stop_pos,
  805. slot.generated_text.end());
  806. pos = std::min(slot.sent_count, slot.generated_text.size());
  807. }
  808. else
  809. {
  810. is_stop_full = false;
  811. stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
  812. }
  813. // check if there is any token to predict
  814. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0))
  815. {
  816. // no send the stop word in the response
  817. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  818. slot.sent_count += result.text_to_send.size();
  819. // add the token to slot queue and cache
  820. }
  821. slot.add_token_string(result);
  822. if (slot.params.stream)
  823. {
  824. send_partial_response(slot, result);
  825. }
  826. }
  827. if (incomplete)
  828. {
  829. slot.has_next_token = true;
  830. }
  831. // check the limits
  832. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
  833. {
  834. slot.stopped_limit = true;
  835. slot.has_next_token = false;
  836. }
  837. if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
  838. {
  839. slot.stopped_eos = true;
  840. slot.has_next_token = false;
  841. LOG_VERBOSE("eos token found", {});
  842. }
  843. LOG_VERBOSE("next token", {
  844. {"token", result.tok},
  845. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  846. {"has_next_token", slot.has_next_token},
  847. {"n_remain", slot.n_remaining},
  848. {"num_tokens_predicted", slot.n_decoded},
  849. {"stopped_eos", slot.stopped_eos},
  850. {"stopped_word", slot.stopped_word},
  851. {"stopped_limit", slot.stopped_limit},
  852. {"stopping_word", slot.stopping_word},
  853. });
  854. return slot.has_next_token; // continue
  855. }
  856. bool process_images(llama_client_slot &slot) const
  857. {
  858. for (slot_image &img : slot.images)
  859. {
  860. if (!img.request_encode_image)
  861. {
  862. continue;
  863. }
  864. clip_image_f32_batch img_res_v;
  865. img_res_v.size = 0;
  866. img_res_v.data = nullptr;
  867. if (!clip_image_preprocess(clp_ctx, img.img_data, img_res_v))
  868. {
  869. LOG_TEE("Error processing the given image");
  870. clip_free(clp_ctx);
  871. clip_image_f32_batch_free(img_res_v);
  872. return false;
  873. }
  874. if (img_res_v.size == 0)
  875. {
  876. LOG_TEE("Error processing the given image");
  877. return false;
  878. }
  879. // note: assumes only one image was returned by clip_image_preprocess
  880. clip_image_f32 * img_res = img_res_v.data;
  881. img.image_tokens = clip_n_patches(clp_ctx);
  882. img.image_embedding = (float *)malloc(clip_embd_nbytes(clp_ctx));
  883. if (!img.image_embedding)
  884. {
  885. LOG_TEE("Unable to allocate memory for image embeddings\n");
  886. clip_image_f32_batch_free(img_res_v);
  887. clip_free(clp_ctx);
  888. return false;
  889. }
  890. LOG_TEE("slot %i - encoding image [id: %i]\n", slot.id, img.id);
  891. if (!clip_image_encode(clp_ctx, params.n_threads, img_res, img.image_embedding))
  892. {
  893. LOG_TEE("Unable to encode image\n");
  894. clip_image_f32_batch_free(img_res_v);
  895. return false;
  896. }
  897. clip_image_f32_batch_free(img_res_v);
  898. img.request_encode_image = false;
  899. }
  900. return slot.images.size() > 0;
  901. }
  902. void send_error(task_server& task, const std::string &error)
  903. {
  904. LOG_TEE("task %i - error: %s\n", task.id, error.c_str());
  905. task_result res;
  906. res.id = task.id;
  907. res.multitask_id = task.multitask_id;
  908. res.stop = false;
  909. res.error = true;
  910. res.result_json = { { "content", error } };
  911. queue_results.send(res);
  912. }
  913. json get_formated_generation(llama_client_slot &slot)
  914. {
  915. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  916. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
  917. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  918. std::vector<std::string> samplers_sequence;
  919. for (const auto &sampler_type : slot.sparams.samplers_sequence)
  920. {
  921. samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
  922. }
  923. return json {
  924. {"n_ctx", slot.n_ctx},
  925. {"n_predict", slot.n_predict},
  926. {"model", params.model_alias},
  927. {"seed", slot.params.seed},
  928. {"temperature", slot.sparams.temp},
  929. {"dynatemp_range", slot.sparams.dynatemp_range},
  930. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  931. {"top_k", slot.sparams.top_k},
  932. {"top_p", slot.sparams.top_p},
  933. {"min_p", slot.sparams.min_p},
  934. {"tfs_z", slot.sparams.tfs_z},
  935. {"typical_p", slot.sparams.typical_p},
  936. {"repeat_last_n", slot.sparams.penalty_last_n},
  937. {"repeat_penalty", slot.sparams.penalty_repeat},
  938. {"presence_penalty", slot.sparams.penalty_present},
  939. {"frequency_penalty", slot.sparams.penalty_freq},
  940. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  941. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  942. {"mirostat", slot.sparams.mirostat},
  943. {"mirostat_tau", slot.sparams.mirostat_tau},
  944. {"mirostat_eta", slot.sparams.mirostat_eta},
  945. {"penalize_nl", slot.sparams.penalize_nl},
  946. {"stop", slot.params.antiprompt},
  947. {"n_predict", slot.params.n_predict},
  948. {"n_keep", params.n_keep},
  949. {"ignore_eos", ignore_eos},
  950. {"stream", slot.params.stream},
  951. {"logit_bias", slot.sparams.logit_bias},
  952. {"n_probs", slot.sparams.n_probs},
  953. {"min_keep", slot.sparams.min_keep},
  954. {"grammar", slot.sparams.grammar},
  955. {"samplers", samplers_sequence}
  956. };
  957. }
  958. void send_partial_response(llama_client_slot &slot, completion_token_output tkn)
  959. {
  960. task_result res;
  961. res.id = slot.task_id;
  962. res.multitask_id = slot.multitask_id;
  963. res.error = false;
  964. res.stop = false;
  965. res.result_json = json
  966. {
  967. {"content", tkn.text_to_send},
  968. {"stop", false},
  969. {"slot_id", slot.id},
  970. {"multimodal", multimodal}
  971. };
  972. if (slot.sparams.n_probs > 0)
  973. {
  974. std::vector<completion_token_output> probs_output = {};
  975. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  976. size_t probs_pos = std::min(slot.sent_token_probs_index, slot.generated_token_probs.size());
  977. size_t probs_stop_pos = std::min(slot.sent_token_probs_index + to_send_toks.size(), slot.generated_token_probs.size());
  978. if (probs_pos < probs_stop_pos)
  979. {
  980. probs_output = std::vector<completion_token_output>(slot.generated_token_probs.begin() + probs_pos, slot.generated_token_probs.begin() + probs_stop_pos);
  981. }
  982. slot.sent_token_probs_index = probs_stop_pos;
  983. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  984. }
  985. if (slot.oaicompat)
  986. {
  987. res.result_json["oaicompat_token_ctr"] = slot.n_decoded;
  988. res.result_json["model"] = slot.oaicompat_model;
  989. }
  990. queue_results.send(res);
  991. }
  992. void send_final_response(llama_client_slot &slot)
  993. {
  994. task_result res;
  995. res.id = slot.task_id;
  996. res.multitask_id = slot.multitask_id;
  997. res.error = false;
  998. res.stop = true;
  999. res.result_json = json
  1000. {
  1001. {"content", !slot.params.stream ? slot.generated_text : ""},
  1002. {"slot_id", slot.id},
  1003. {"stop", true},
  1004. {"model", params.model_alias},
  1005. {"tokens_predicted", slot.n_decoded},
  1006. {"tokens_evaluated", slot.num_prompt_tokens},
  1007. {"generation_settings", get_formated_generation(slot)},
  1008. {"prompt", slot.prompt},
  1009. {"truncated", slot.truncated},
  1010. {"stopped_eos", slot.stopped_eos},
  1011. {"stopped_word", slot.stopped_word},
  1012. {"stopped_limit", slot.stopped_limit},
  1013. {"stopping_word", slot.stopping_word},
  1014. {"tokens_cached", slot.n_past},
  1015. {"timings", slot.get_formated_timings()}
  1016. };
  1017. if (slot.sparams.n_probs > 0)
  1018. {
  1019. std::vector<completion_token_output> probs = {};
  1020. if (!slot.params.stream && slot.stopped_word)
  1021. {
  1022. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  1023. probs = std::vector<completion_token_output>(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size());
  1024. }
  1025. else
  1026. {
  1027. probs = std::vector<completion_token_output>(
  1028. slot.generated_token_probs.begin(),
  1029. slot.generated_token_probs.end());
  1030. }
  1031. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1032. }
  1033. if (slot.oaicompat)
  1034. {
  1035. res.result_json["oaicompat_token_ctr"] = slot.n_decoded;
  1036. res.result_json["model"] = slot.oaicompat_model;
  1037. }
  1038. queue_results.send(res);
  1039. }
  1040. void send_embedding(llama_client_slot &slot)
  1041. {
  1042. task_result res;
  1043. res.id = slot.task_id;
  1044. res.multitask_id = slot.multitask_id;
  1045. res.error = false;
  1046. res.stop = true;
  1047. const int n_embd = llama_n_embd(model);
  1048. if (!params.embedding)
  1049. {
  1050. LOG_WARNING("embedding disabled", {
  1051. {"params.embedding", params.embedding},
  1052. });
  1053. res.result_json = json
  1054. {
  1055. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1056. };
  1057. }
  1058. else
  1059. {
  1060. const float *data = llama_get_embeddings(ctx);
  1061. std::vector<float> embedding(data, data + n_embd);
  1062. res.result_json = json
  1063. {
  1064. {"embedding", embedding },
  1065. };
  1066. }
  1067. queue_results.send(res);
  1068. }
  1069. void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
  1070. {
  1071. task_server task;
  1072. task.id = task_id;
  1073. task.target_id = 0;
  1074. task.data = std::move(data);
  1075. task.infill_mode = infill;
  1076. task.embedding_mode = embedding;
  1077. task.type = TASK_TYPE_COMPLETION;
  1078. task.multitask_id = multitask_id;
  1079. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1080. // otherwise, it's a single-prompt task, we actually queue it
  1081. // if there's numbers in the prompt array it will be treated as an array of tokens
  1082. if (task.data.count("prompt") != 0 && task.data.at("prompt").size() > 1) {
  1083. bool numbers = false;
  1084. for (const auto& e : task.data.at("prompt")) {
  1085. if (e.is_number()) {
  1086. numbers = true;
  1087. break;
  1088. }
  1089. }
  1090. // NOTE: split_multiprompt_task() does not handle a mix of strings and numbers,
  1091. // it will completely stall the server. I don't know where the bug for this is.
  1092. //
  1093. // if there are numbers, it needs to be treated like a single prompt,
  1094. // queue_tasks handles a mix of strings and numbers just fine.
  1095. if (numbers) {
  1096. queue_tasks.post(task);
  1097. } else {
  1098. split_multiprompt_task(task_id, task);
  1099. }
  1100. } else {
  1101. queue_tasks.post(task);
  1102. }
  1103. }
  1104. // for multiple images processing
  1105. bool ingest_images(llama_client_slot &slot, int n_batch)
  1106. {
  1107. int image_idx = 0;
  1108. while (image_idx < (int) slot.images.size())
  1109. {
  1110. slot_image &img = slot.images[image_idx];
  1111. // process prefix prompt
  1112. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1113. {
  1114. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1115. llama_batch batch_view = {
  1116. n_tokens,
  1117. batch.token + i,
  1118. nullptr,
  1119. batch.pos + i,
  1120. batch.n_seq_id + i,
  1121. batch.seq_id + i,
  1122. batch.logits + i,
  1123. 0, 0, 0, // unused
  1124. };
  1125. if (llama_decode(ctx, batch_view))
  1126. {
  1127. LOG_TEE("%s : failed to eval\n", __func__);
  1128. return false;
  1129. }
  1130. }
  1131. // process image with llm
  1132. for (int i = 0; i < img.image_tokens; i += n_batch)
  1133. {
  1134. int n_eval = img.image_tokens - i;
  1135. if (n_eval > n_batch)
  1136. {
  1137. n_eval = n_batch;
  1138. }
  1139. const int n_embd = llama_n_embd(model);
  1140. llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, slot.n_past, 1, 0, };
  1141. if (llama_decode(ctx, batch_img))
  1142. {
  1143. LOG_TEE("%s : failed to eval image\n", __func__);
  1144. return false;
  1145. }
  1146. slot.n_past += n_eval;
  1147. }
  1148. image_idx++;
  1149. llama_batch_clear(batch);
  1150. // append prefix of next image
  1151. const auto json_prompt = (image_idx >= (int) slot.images.size()) ?
  1152. slot.params.input_suffix : // no more images, then process suffix prompt
  1153. (json)(slot.images[image_idx].prefix_prompt);
  1154. std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
  1155. for (int i = 0; i < (int) append_tokens.size(); ++i)
  1156. {
  1157. llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true);
  1158. slot.n_past += 1;
  1159. }
  1160. }
  1161. return true;
  1162. }
  1163. void request_cancel(int task_id)
  1164. {
  1165. task_server task;
  1166. task.type = TASK_TYPE_CANCEL;
  1167. task.target_id = task_id;
  1168. queue_tasks.post(task);
  1169. }
  1170. void split_multiprompt_task(int multitask_id, task_server& multiprompt_task)
  1171. {
  1172. int prompt_count = multiprompt_task.data.at("prompt").size();
  1173. if (prompt_count <= 1) {
  1174. send_error(multiprompt_task, "error while handling multiple prompts");
  1175. return;
  1176. }
  1177. // generate all the ID for subtask
  1178. std::vector<int> subtask_ids(prompt_count);
  1179. for (int i = 0; i < prompt_count; i++)
  1180. {
  1181. subtask_ids[i] = queue_tasks.get_new_id();
  1182. }
  1183. // queue up the multitask so we can track its subtask progression
  1184. queue_tasks.add_multitask(multitask_id, subtask_ids);
  1185. // add subtasks
  1186. for (int i = 0; i < prompt_count; i++)
  1187. {
  1188. json subtask_data = multiprompt_task.data;
  1189. subtask_data["prompt"] = subtask_data["prompt"][i];
  1190. // subtasks inherit everything else (infill mode, embedding mode, etc.)
  1191. request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
  1192. }
  1193. }
  1194. void process_single_task(task_server& task)
  1195. {
  1196. switch (task.type)
  1197. {
  1198. case TASK_TYPE_COMPLETION: {
  1199. llama_client_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
  1200. if (slot == nullptr)
  1201. {
  1202. // if no slot is available, we defer this task for processing later
  1203. LOG_VERBOSE("no slot is available", {});
  1204. queue_tasks.defer(task);
  1205. break;
  1206. }
  1207. if (task.data.contains("system_prompt"))
  1208. {
  1209. if (!all_slots_are_idle) {
  1210. send_error(task, "system prompt can only be updated when all slots are idle");
  1211. break;
  1212. }
  1213. process_system_prompt_data(task.data["system_prompt"]);
  1214. // reset cache_tokens for all slots
  1215. for (llama_client_slot &slot : slots)
  1216. {
  1217. slot.cache_tokens.clear();
  1218. slot.n_past = 0;
  1219. slot.n_past_se = 0;
  1220. }
  1221. }
  1222. slot->reset();
  1223. slot->infill = task.infill_mode;
  1224. slot->embedding = task.embedding_mode;
  1225. slot->task_id = task.id;
  1226. slot->multitask_id = task.multitask_id;
  1227. if (!launch_slot_with_data(slot, task.data))
  1228. {
  1229. // send error result
  1230. send_error(task, "internal_error");
  1231. break;
  1232. }
  1233. } break;
  1234. case TASK_TYPE_CANCEL: { // release slot linked with the task id
  1235. for (auto & slot : slots)
  1236. {
  1237. if (slot.task_id == task.target_id)
  1238. {
  1239. slot.release();
  1240. break;
  1241. }
  1242. }
  1243. } break;
  1244. case TASK_TYPE_NEXT_RESPONSE: {
  1245. // do nothing
  1246. } break;
  1247. }
  1248. }
  1249. void on_finish_multitask(task_multi& multitask)
  1250. {
  1251. // all subtasks done == multitask is done
  1252. task_result result;
  1253. result.id = multitask.id;
  1254. result.stop = true;
  1255. result.error = false;
  1256. // collect json results into one json result
  1257. std::vector<json> result_jsons;
  1258. for (auto& subres : multitask.results)
  1259. {
  1260. result_jsons.push_back(subres.result_json);
  1261. result.error = result.error && subres.error;
  1262. }
  1263. result.result_json = json{ { "results", result_jsons } };
  1264. queue_results.send(result);
  1265. }
  1266. bool update_slots() {
  1267. if (system_need_update)
  1268. {
  1269. LOG_TEE("updating system prompt\n");
  1270. update_system_prompt();
  1271. }
  1272. llama_batch_clear(batch);
  1273. if (all_slots_are_idle)
  1274. {
  1275. if (system_prompt.empty() && clean_kv_cache)
  1276. {
  1277. LOG_TEE("all slots are idle and system prompt is empty, clear the KV cache\n");
  1278. kv_cache_clear();
  1279. }
  1280. return true;
  1281. }
  1282. task_server task;
  1283. task.type = TASK_TYPE_NEXT_RESPONSE;
  1284. task.target_id = -1;
  1285. queue_tasks.post(task);
  1286. for (llama_client_slot &slot : slots)
  1287. {
  1288. if (slot.ga_n == 1)
  1289. {
  1290. if (slot.is_processing() && system_tokens.size() + slot.cache_tokens.size() >= (size_t) slot.n_ctx)
  1291. {
  1292. // Shift context
  1293. const int n_left = system_tokens.size() + slot.n_past - slot.params.n_keep - 1;
  1294. const int n_discard = n_left / 2;
  1295. LOG_TEE("slot %d: context shift - n_keep = %d, n_left = %d, n_discard = %d\n", slot.id, slot.params.n_keep, n_left, n_discard);
  1296. llama_kv_cache_seq_rm (ctx, slot.id, slot.params.n_keep + 1 , slot.params.n_keep + n_discard + 1);
  1297. llama_kv_cache_seq_shift(ctx, slot.id, slot.params.n_keep + 1 + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1298. for (size_t i = slot.params.n_keep + 1 + n_discard; i < slot.cache_tokens.size(); i++)
  1299. {
  1300. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1301. }
  1302. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1303. slot.n_past -= n_discard;
  1304. slot.truncated = true;
  1305. LOG_VERBOSE("context shift", {
  1306. { "n_ctx", n_ctx },
  1307. { "n_keep", params.n_keep },
  1308. { "n_left", n_left },
  1309. });
  1310. }
  1311. }
  1312. }
  1313. // decode any currently ongoing sequences
  1314. for (auto & slot : slots)
  1315. {
  1316. // release the slot
  1317. if (slot.command == RELEASE)
  1318. {
  1319. slot.state = IDLE;
  1320. slot.command = NONE;
  1321. slot.t_last_used = ggml_time_us();
  1322. LOG_TEE("slot %d released (%d tokens in cache)\n", slot.id, (int) slot.cache_tokens.size());
  1323. queue_tasks.notify_slot_changed();
  1324. continue;
  1325. }
  1326. if (slot.state == IDLE)
  1327. {
  1328. continue;
  1329. }
  1330. slot.i_batch = batch.n_tokens;
  1331. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1332. // TODO: we always have to take into account the "system_tokens"
  1333. // this is not great and needs to be improved somehow
  1334. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true);
  1335. slot.n_past += 1;
  1336. }
  1337. // process in chunks of params.n_batch
  1338. int32_t n_batch = params.n_batch;
  1339. // assign workload to the slots
  1340. if (params.cont_batching || batch.n_tokens == 0)
  1341. {
  1342. for (auto & slot : slots)
  1343. {
  1344. const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
  1345. // empty prompt passed -> release the slot and send empty response
  1346. // note: infill mode allows empty prompt
  1347. if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
  1348. {
  1349. slot.release();
  1350. slot.print_timings();
  1351. send_final_response(slot);
  1352. continue;
  1353. }
  1354. // need process the prompt
  1355. if (slot.state == IDLE && slot.command == LOAD_PROMPT)
  1356. {
  1357. slot.state = PROCESSING;
  1358. slot.command = NONE;
  1359. std::vector<llama_token> prompt_tokens;
  1360. slot.t_start_process_prompt = ggml_time_us();
  1361. slot.t_start_genereration = 0;
  1362. if (slot.infill)
  1363. {
  1364. bool suff_rm_leading_spc = true;
  1365. if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
  1366. {
  1367. params.input_suffix.erase(0, 1);
  1368. suff_rm_leading_spc = false;
  1369. }
  1370. auto prefix_tokens = tokenize(slot.params.input_prefix, false);
  1371. auto suffix_tokens = tokenize(slot.params.input_suffix, false);
  1372. const int space_token = 29871; // TODO: this should not be hardcoded
  1373. if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
  1374. suffix_tokens.erase(suffix_tokens.begin());
  1375. }
  1376. prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
  1377. prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
  1378. prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
  1379. prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
  1380. prefix_tokens.push_back(llama_token_middle(model));
  1381. prompt_tokens = prefix_tokens;
  1382. }
  1383. else
  1384. {
  1385. prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
  1386. }
  1387. slot.num_prompt_tokens = prompt_tokens.size();
  1388. if (slot.params.n_keep < 0)
  1389. {
  1390. slot.params.n_keep = slot.num_prompt_tokens;
  1391. }
  1392. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1393. // if input prompt is too big, truncate it
  1394. if (slot.num_prompt_tokens >= slot.n_ctx)
  1395. {
  1396. const int n_left = slot.n_ctx - slot.params.n_keep;
  1397. const int n_block_size = n_left / 2;
  1398. const int erased_blocks = (slot.num_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1399. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + slot.params.n_keep);
  1400. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, prompt_tokens.end());
  1401. LOG_VERBOSE("input truncated", {
  1402. {"n_ctx", slot.n_ctx},
  1403. {"n_keep", slot.params.n_keep},
  1404. {"n_left", n_left},
  1405. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  1406. });
  1407. slot.truncated = true;
  1408. prompt_tokens = new_tokens;
  1409. slot.num_prompt_tokens = prompt_tokens.size();
  1410. GGML_ASSERT(slot.num_prompt_tokens < slot.n_ctx);
  1411. }
  1412. if (!slot.params.cache_prompt)
  1413. {
  1414. llama_sampling_reset(slot.ctx_sampling);
  1415. slot.n_past = 0;
  1416. slot.n_past_se = 0;
  1417. slot.ga_i = 0;
  1418. slot.num_prompt_tokens_processed = slot.num_prompt_tokens;
  1419. }
  1420. else
  1421. {
  1422. // push the prompt into the sampling context (do not apply grammar)
  1423. for (auto &token : prompt_tokens)
  1424. {
  1425. llama_sampling_accept(slot.ctx_sampling, ctx, token, false);
  1426. }
  1427. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1428. slot.num_prompt_tokens_processed = slot.num_prompt_tokens - slot.n_past;
  1429. if (slot.ga_n != 1)
  1430. {
  1431. int ga_i = 0;
  1432. int32_t ga_n = slot.ga_n;
  1433. int32_t ga_w = slot.ga_w;
  1434. int32_t slot_npast = 0;
  1435. for (int k = 0; k < slot.n_past; ++k)
  1436. {
  1437. while (slot_npast >= ga_i + ga_w) {
  1438. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1439. slot_npast -= bd;
  1440. ga_i += ga_w/ga_n;
  1441. }
  1442. slot_npast++;
  1443. }
  1444. slot.n_past_se = slot_npast;
  1445. slot.ga_i = ga_i;
  1446. }
  1447. LOG_TEE("slot %d : in cache: %i tokens | to process: %i tokens\n", slot.id, slot.n_past, slot.num_prompt_tokens_processed);
  1448. }
  1449. slot.cache_tokens = prompt_tokens;
  1450. if (slot.n_past == slot.num_prompt_tokens && slot.n_past > 0)
  1451. {
  1452. // we have to evaluate at least 1 token to generate logits.
  1453. LOG_TEE("slot %d : we have to evaluate at least 1 token to generate logits\n", slot.id);
  1454. slot.n_past--;
  1455. if (slot.ga_i > 0)
  1456. {
  1457. slot.n_past_se--;
  1458. }
  1459. }
  1460. LOG_TEE("slot %d : kv cache rm - [%d, end)\n", slot.id, (int) system_tokens.size() + slot.n_past);
  1461. llama_kv_cache_seq_rm(ctx, slot.id, system_tokens.size() + slot.n_past, -1);
  1462. LOG_VERBOSE("prompt ingested", {
  1463. {"n_past", slot.n_past},
  1464. {"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
  1465. {"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
  1466. });
  1467. const bool has_images = process_images(slot);
  1468. // process the prefix of first image
  1469. std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
  1470. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1471. int32_t ga_i = slot.ga_i;
  1472. int32_t ga_n = slot.ga_n;
  1473. int32_t ga_w = slot.ga_w;
  1474. for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
  1475. {
  1476. if (slot.ga_n != 1)
  1477. {
  1478. while (slot_npast >= ga_i + ga_w) {
  1479. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1480. slot_npast -= bd;
  1481. ga_i += ga_w/ga_n;
  1482. }
  1483. }
  1484. llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, {slot.id }, false);
  1485. slot_npast++;
  1486. }
  1487. if (has_images && !ingest_images(slot, n_batch))
  1488. {
  1489. LOG_TEE("failed processing images\n");
  1490. return false;
  1491. }
  1492. // extract the logits only for the last token
  1493. if (batch.n_tokens > 0)
  1494. {
  1495. batch.logits[batch.n_tokens - 1] = true;
  1496. }
  1497. slot.n_decoded = 0;
  1498. slot.i_batch = batch.n_tokens - 1;
  1499. }
  1500. }
  1501. }
  1502. if (batch.n_tokens == 0)
  1503. {
  1504. all_slots_are_idle = true;
  1505. return true;
  1506. }
  1507. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1508. {
  1509. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1510. for (auto & slot : slots)
  1511. {
  1512. if (slot.ga_n != 1)
  1513. {
  1514. // context extension via Self-Extend
  1515. while (slot.n_past_se >= slot.ga_i + slot.ga_w)
  1516. {
  1517. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1518. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1519. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1520. LOG_TEE("\n");
  1521. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1522. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1523. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1524. llama_kv_cache_seq_shift(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd);
  1525. llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n);
  1526. llama_kv_cache_seq_shift(ctx, slot.id, slot.ga_i + ib * bd + slot.ga_w,slot.n_past_se + ib * bd, dd);
  1527. slot.n_past_se -= bd;
  1528. slot.ga_i += slot.ga_w / slot.ga_n;
  1529. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1530. }
  1531. slot.n_past_se += n_tokens;
  1532. }
  1533. }
  1534. llama_batch batch_view =
  1535. {
  1536. n_tokens,
  1537. batch.token + i,
  1538. nullptr,
  1539. batch.pos + i,
  1540. batch.n_seq_id + i,
  1541. batch.seq_id + i,
  1542. batch.logits + i,
  1543. 0, 0, 0, // unused
  1544. };
  1545. const int ret = llama_decode(ctx, batch_view);
  1546. if (ret != 0)
  1547. {
  1548. if (n_batch == 1 || ret < 0)
  1549. {
  1550. // if you get here, it means the KV cache is full - try increasing it via the context size
  1551. LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
  1552. return false;
  1553. }
  1554. LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2);
  1555. // retry with half the batch size to try to find a free slot in the KV cache
  1556. n_batch /= 2;
  1557. i -= n_batch;
  1558. continue;
  1559. }
  1560. for (auto & slot : slots)
  1561. {
  1562. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
  1563. {
  1564. continue;
  1565. }
  1566. // prompt evaluated for embedding
  1567. if (slot.embedding)
  1568. {
  1569. send_embedding(slot);
  1570. slot.release();
  1571. slot.i_batch = -1;
  1572. return true;
  1573. }
  1574. completion_token_output result;
  1575. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1576. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1577. slot.n_decoded += 1;
  1578. if (slot.n_decoded == 1)
  1579. {
  1580. slot.t_start_genereration = ggml_time_us();
  1581. slot.t_prompt_processing = (slot.t_start_genereration - slot.t_start_process_prompt) / 1e3;
  1582. }
  1583. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1584. result.tok = id;
  1585. const int32_t n_probs = slot.sparams.n_probs;
  1586. if (slot.sparams.temp <= 0 && n_probs > 0)
  1587. {
  1588. // for llama_sample_token_greedy we need to sort candidates
  1589. llama_sample_softmax(ctx, &cur_p);
  1590. }
  1591. for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
  1592. {
  1593. result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
  1594. }
  1595. if (!process_token(result, slot))
  1596. {
  1597. slot.release();
  1598. slot.print_timings();
  1599. send_final_response(slot);
  1600. }
  1601. slot.i_batch = -1;
  1602. }
  1603. }
  1604. return true;
  1605. }
  1606. void run_on_all_tasks_finished() {
  1607. update_slots();
  1608. }
  1609. };
  1610. static void server_print_usage(const char *argv0, const gpt_params &params,
  1611. const server_params &sparams)
  1612. {
  1613. printf("usage: %s [options]\n", argv0);
  1614. printf("\n");
  1615. printf("options:\n");
  1616. printf(" -h, --help show this help message and exit\n");
  1617. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  1618. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  1619. printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
  1620. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  1621. printf(" --rope-scaling {none,linear,yarn}\n");
  1622. printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
  1623. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  1624. printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
  1625. printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
  1626. printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
  1627. printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
  1628. printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
  1629. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  1630. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  1631. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  1632. if (llama_supports_mlock())
  1633. {
  1634. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  1635. }
  1636. if (llama_supports_mmap())
  1637. {
  1638. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  1639. }
  1640. printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
  1641. printf(" - distribute: spread execution evenly over all nodes\n");
  1642. printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
  1643. printf(" - numactl: use the CPU map provided my numactl\n");
  1644. if (llama_supports_gpu_offload()) {
  1645. printf(" -ngl N, --n-gpu-layers N\n");
  1646. printf(" number of layers to store in VRAM\n");
  1647. printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
  1648. printf(" how to split the model across multiple GPUs, one of:\n");
  1649. printf(" - none: use one GPU only\n");
  1650. printf(" - layer (default): split layers and KV across GPUs\n");
  1651. printf(" - row: split rows across GPUs\n");
  1652. printf(" -ts SPLIT --tensor-split SPLIT\n");
  1653. printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
  1654. printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
  1655. printf(" or for intermediate results and KV (with split-mode = row)\n");
  1656. }
  1657. printf(" -m FNAME, --model FNAME\n");
  1658. printf(" model path (default: %s)\n", params.model.c_str());
  1659. printf(" -a ALIAS, --alias ALIAS\n");
  1660. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  1661. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  1662. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  1663. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  1664. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  1665. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  1666. printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
  1667. printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
  1668. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  1669. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  1670. printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
  1671. printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
  1672. printf(" -spf FNAME, --system-prompt-file FNAME\n");
  1673. printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
  1674. printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
  1675. printf(" --log-disable disables logging to a file.\n");
  1676. printf(" --slots-endpoint-disable disables slots monitoring endpoint.\n");
  1677. printf("\n");
  1678. printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
  1679. printf(" --override-kv KEY=TYPE:VALUE\n");
  1680. printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
  1681. printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
  1682. printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
  1683. printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
  1684. printf(" --chat-template FORMAT_NAME");
  1685. printf(" set chat template, possible value is: llama2, chatml (default %s)", sparams.chat_template.c_str());
  1686. printf("\n");
  1687. }
  1688. static void server_params_parse(int argc, char **argv, server_params &sparams,
  1689. gpt_params &params, llama_server_context& llama)
  1690. {
  1691. gpt_params default_params;
  1692. server_params default_sparams;
  1693. std::string arg;
  1694. bool invalid_param = false;
  1695. for (int i = 1; i < argc; i++)
  1696. {
  1697. arg = argv[i];
  1698. if (arg == "--port")
  1699. {
  1700. if (++i >= argc)
  1701. {
  1702. invalid_param = true;
  1703. break;
  1704. }
  1705. sparams.port = std::stoi(argv[i]);
  1706. }
  1707. else if (arg == "--host")
  1708. {
  1709. if (++i >= argc)
  1710. {
  1711. invalid_param = true;
  1712. break;
  1713. }
  1714. sparams.hostname = argv[i];
  1715. }
  1716. else if (arg == "--path")
  1717. {
  1718. if (++i >= argc)
  1719. {
  1720. invalid_param = true;
  1721. break;
  1722. }
  1723. sparams.public_path = argv[i];
  1724. }
  1725. else if (arg == "--api-key")
  1726. {
  1727. if (++i >= argc)
  1728. {
  1729. invalid_param = true;
  1730. break;
  1731. }
  1732. sparams.api_keys.emplace_back(argv[i]);
  1733. }
  1734. else if (arg == "--api-key-file")
  1735. {
  1736. if (++i >= argc)
  1737. {
  1738. invalid_param = true;
  1739. break;
  1740. }
  1741. std::ifstream key_file(argv[i]);
  1742. if (!key_file) {
  1743. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1744. invalid_param = true;
  1745. break;
  1746. }
  1747. std::string key;
  1748. while (std::getline(key_file, key)) {
  1749. if (key.size() > 0) {
  1750. sparams.api_keys.push_back(key);
  1751. }
  1752. }
  1753. key_file.close();
  1754. }
  1755. else if (arg == "--timeout" || arg == "-to")
  1756. {
  1757. if (++i >= argc)
  1758. {
  1759. invalid_param = true;
  1760. break;
  1761. }
  1762. sparams.read_timeout = std::stoi(argv[i]);
  1763. sparams.write_timeout = std::stoi(argv[i]);
  1764. }
  1765. else if (arg == "-m" || arg == "--model")
  1766. {
  1767. if (++i >= argc)
  1768. {
  1769. invalid_param = true;
  1770. break;
  1771. }
  1772. params.model = argv[i];
  1773. }
  1774. else if (arg == "-a" || arg == "--alias")
  1775. {
  1776. if (++i >= argc)
  1777. {
  1778. invalid_param = true;
  1779. break;
  1780. }
  1781. params.model_alias = argv[i];
  1782. }
  1783. else if (arg == "-h" || arg == "--help")
  1784. {
  1785. server_print_usage(argv[0], default_params, default_sparams);
  1786. exit(0);
  1787. }
  1788. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  1789. {
  1790. if (++i >= argc)
  1791. {
  1792. invalid_param = true;
  1793. break;
  1794. }
  1795. params.n_ctx = std::stoi(argv[i]);
  1796. }
  1797. else if (arg == "--rope-scaling")
  1798. {
  1799. if (++i >= argc)
  1800. {
  1801. invalid_param = true;
  1802. break;
  1803. }
  1804. std::string value(argv[i]);
  1805. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; }
  1806. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; }
  1807. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; }
  1808. else { invalid_param = true; break; }
  1809. }
  1810. else if (arg == "--rope-freq-base")
  1811. {
  1812. if (++i >= argc)
  1813. {
  1814. invalid_param = true;
  1815. break;
  1816. }
  1817. params.rope_freq_base = std::stof(argv[i]);
  1818. }
  1819. else if (arg == "--rope-freq-scale")
  1820. {
  1821. if (++i >= argc)
  1822. {
  1823. invalid_param = true;
  1824. break;
  1825. }
  1826. params.rope_freq_scale = std::stof(argv[i]);
  1827. }
  1828. else if (arg == "--yarn-ext-factor")
  1829. {
  1830. if (++i >= argc) {
  1831. invalid_param = true;
  1832. break;
  1833. }
  1834. params.yarn_ext_factor = std::stof(argv[i]);
  1835. }
  1836. else if (arg == "--yarn-attn-factor")
  1837. {
  1838. if (++i >= argc) {
  1839. invalid_param = true;
  1840. break;
  1841. }
  1842. params.yarn_attn_factor = std::stof(argv[i]);
  1843. }
  1844. else if (arg == "--yarn-beta-fast")
  1845. {
  1846. if (++i >= argc) {
  1847. invalid_param = true;
  1848. break;
  1849. }
  1850. params.yarn_beta_fast = std::stof(argv[i]);
  1851. }
  1852. else if (arg == "--yarn-beta-slow")
  1853. {
  1854. if (++i >= argc) {
  1855. invalid_param = true;
  1856. break;
  1857. }
  1858. params.yarn_beta_slow = std::stof(argv[i]);
  1859. }
  1860. else if (arg == "--threads" || arg == "-t")
  1861. {
  1862. if (++i >= argc)
  1863. {
  1864. invalid_param = true;
  1865. break;
  1866. }
  1867. params.n_threads = std::stoi(argv[i]);
  1868. }
  1869. else if (arg == "--grp-attn-n" || arg == "-gan")
  1870. {
  1871. if (++i >= argc) {
  1872. invalid_param = true;
  1873. break;
  1874. }
  1875. params.grp_attn_n = std::stoi(argv[i]);
  1876. }
  1877. else if (arg == "--grp-attn-w" || arg == "-gaw")
  1878. {
  1879. if (++i >= argc)
  1880. {
  1881. invalid_param = true;
  1882. break;
  1883. }
  1884. params.grp_attn_w = std::stoi(argv[i]);
  1885. }
  1886. else if (arg == "--threads-batch" || arg == "-tb")
  1887. {
  1888. if (++i >= argc)
  1889. {
  1890. invalid_param = true;
  1891. break;
  1892. }
  1893. params.n_threads_batch = std::stoi(argv[i]);
  1894. }
  1895. else if (arg == "-b" || arg == "--batch-size")
  1896. {
  1897. if (++i >= argc)
  1898. {
  1899. invalid_param = true;
  1900. break;
  1901. }
  1902. params.n_batch = std::stoi(argv[i]);
  1903. params.n_batch = std::min(512, params.n_batch);
  1904. }
  1905. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  1906. {
  1907. if (++i >= argc)
  1908. {
  1909. invalid_param = true;
  1910. break;
  1911. }
  1912. if (llama_supports_gpu_offload()) {
  1913. params.n_gpu_layers = std::stoi(argv[i]);
  1914. } else {
  1915. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  1916. "See main README.md for information on enabling GPU BLAS support",
  1917. {{"n_gpu_layers", params.n_gpu_layers}});
  1918. }
  1919. }
  1920. else if (arg == "--split-mode" || arg == "-sm")
  1921. {
  1922. if (++i >= argc) {
  1923. invalid_param = true;
  1924. break;
  1925. }
  1926. std::string arg_next = argv[i];
  1927. if (arg_next == "none")
  1928. {
  1929. params.split_mode = LLAMA_SPLIT_NONE;
  1930. }
  1931. else if (arg_next == "layer")
  1932. {
  1933. params.split_mode = LLAMA_SPLIT_LAYER;
  1934. }
  1935. else if (arg_next == "row")
  1936. {
  1937. params.split_mode = LLAMA_SPLIT_ROW;
  1938. }
  1939. else {
  1940. invalid_param = true;
  1941. break;
  1942. }
  1943. #ifndef GGML_USE_CUBLAS
  1944. fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
  1945. #endif // GGML_USE_CUBLAS
  1946. }
  1947. else if (arg == "--tensor-split" || arg == "-ts")
  1948. {
  1949. if (++i >= argc)
  1950. {
  1951. invalid_param = true;
  1952. break;
  1953. }
  1954. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1955. std::string arg_next = argv[i];
  1956. // split string by , and /
  1957. const std::regex regex{R"([,/]+)"};
  1958. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  1959. std::vector<std::string> split_arg{it, {}};
  1960. GGML_ASSERT(split_arg.size() <= llama_max_devices());
  1961. for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device)
  1962. {
  1963. if (i_device < split_arg.size())
  1964. {
  1965. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  1966. }
  1967. else
  1968. {
  1969. params.tensor_split[i_device] = 0.0f;
  1970. }
  1971. }
  1972. #else
  1973. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  1974. #endif // GGML_USE_CUBLAS
  1975. }
  1976. else if (arg == "--no-mul-mat-q" || arg == "-nommq")
  1977. {
  1978. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1979. params.mul_mat_q = false;
  1980. #else
  1981. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
  1982. #endif // GGML_USE_CUBLAS
  1983. }
  1984. else if (arg == "--main-gpu" || arg == "-mg")
  1985. {
  1986. if (++i >= argc)
  1987. {
  1988. invalid_param = true;
  1989. break;
  1990. }
  1991. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  1992. params.main_gpu = std::stoi(argv[i]);
  1993. #else
  1994. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  1995. #endif
  1996. }
  1997. else if (arg == "--lora")
  1998. {
  1999. if (++i >= argc)
  2000. {
  2001. invalid_param = true;
  2002. break;
  2003. }
  2004. params.lora_adapter.emplace_back(argv[i], 1.0f);
  2005. params.use_mmap = false;
  2006. }
  2007. else if (arg == "--lora-scaled")
  2008. {
  2009. if (++i >= argc)
  2010. {
  2011. invalid_param = true;
  2012. break;
  2013. }
  2014. const char * lora_adapter = argv[i];
  2015. if (++i >= argc)
  2016. {
  2017. invalid_param = true;
  2018. break;
  2019. }
  2020. params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
  2021. params.use_mmap = false;
  2022. }
  2023. else if (arg == "--lora-base")
  2024. {
  2025. if (++i >= argc)
  2026. {
  2027. invalid_param = true;
  2028. break;
  2029. }
  2030. params.lora_base = argv[i];
  2031. }
  2032. else if (arg == "-v" || arg == "--verbose")
  2033. {
  2034. #if SERVER_VERBOSE != 1
  2035. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  2036. #else
  2037. server_verbose = true;
  2038. #endif
  2039. }
  2040. else if (arg == "--mlock")
  2041. {
  2042. params.use_mlock = true;
  2043. }
  2044. else if (arg == "--no-mmap")
  2045. {
  2046. params.use_mmap = false;
  2047. }
  2048. else if (arg == "--numa") {
  2049. if (++i >= argc) {
  2050. invalid_param = true;
  2051. break;
  2052. } else {
  2053. std::string value(argv[i]);
  2054. /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2055. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2056. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2057. else { invalid_param = true; break; }
  2058. }
  2059. }
  2060. else if (arg == "--embedding")
  2061. {
  2062. params.embedding = true;
  2063. }
  2064. else if (arg == "-cb" || arg == "--cont-batching")
  2065. {
  2066. params.cont_batching = true;
  2067. }
  2068. else if (arg == "-np" || arg == "--parallel")
  2069. {
  2070. if (++i >= argc)
  2071. {
  2072. invalid_param = true;
  2073. break;
  2074. }
  2075. params.n_parallel = std::stoi(argv[i]);
  2076. } else if (arg == "-n" || arg == "--n-predict")
  2077. {
  2078. if (++i >= argc)
  2079. {
  2080. invalid_param = true;
  2081. break;
  2082. }
  2083. params.n_predict = std::stoi(argv[i]);
  2084. } else if (arg == "-spf" || arg == "--system-prompt-file")
  2085. {
  2086. if (++i >= argc)
  2087. {
  2088. invalid_param = true;
  2089. break;
  2090. }
  2091. std::ifstream file(argv[i]);
  2092. if (!file) {
  2093. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  2094. invalid_param = true;
  2095. break;
  2096. }
  2097. std::string systm_content;
  2098. std::copy(
  2099. std::istreambuf_iterator<char>(file),
  2100. std::istreambuf_iterator<char>(),
  2101. std::back_inserter(systm_content)
  2102. );
  2103. llama.process_system_prompt_data(json::parse(systm_content));
  2104. }
  2105. else if(arg == "--mmproj")
  2106. {
  2107. if (++i >= argc)
  2108. {
  2109. invalid_param = true;
  2110. break;
  2111. }
  2112. params.mmproj = argv[i];
  2113. }
  2114. else if (arg == "--log-disable")
  2115. {
  2116. log_set_target(stdout);
  2117. LOG_INFO("logging to file is disabled.", {});
  2118. }
  2119. else if (arg == "--slots-endpoint-disable")
  2120. {
  2121. sparams.slots_endpoint = false;
  2122. }
  2123. else if (arg == "--chat-template")
  2124. {
  2125. if (++i >= argc)
  2126. {
  2127. invalid_param = true;
  2128. break;
  2129. }
  2130. std::string value(argv[i]);
  2131. if (value != "chatml" && value != "llama2") {
  2132. fprintf(stderr, "error: chat template can be \"llama2\" or \"chatml\", but got: %s\n", value.c_str());
  2133. invalid_param = true;
  2134. break;
  2135. }
  2136. sparams.chat_template = value;
  2137. }
  2138. else if (arg == "--override-kv")
  2139. {
  2140. if (++i >= argc) {
  2141. invalid_param = true;
  2142. break;
  2143. }
  2144. char * sep = strchr(argv[i], '=');
  2145. if (sep == nullptr || sep - argv[i] >= 128) {
  2146. fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
  2147. invalid_param = true;
  2148. break;
  2149. }
  2150. struct llama_model_kv_override kvo;
  2151. std::strncpy(kvo.key, argv[i], sep - argv[i]);
  2152. kvo.key[sep - argv[i]] = 0;
  2153. sep++;
  2154. if (strncmp(sep, "int:", 4) == 0) {
  2155. sep += 4;
  2156. kvo.tag = LLAMA_KV_OVERRIDE_INT;
  2157. kvo.int_value = std::atol(sep);
  2158. } else if (strncmp(sep, "float:", 6) == 0) {
  2159. sep += 6;
  2160. kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
  2161. kvo.float_value = std::atof(sep);
  2162. } else if (strncmp(sep, "bool:", 5) == 0) {
  2163. sep += 5;
  2164. kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
  2165. if (std::strcmp(sep, "true") == 0) {
  2166. kvo.bool_value = true;
  2167. } else if (std::strcmp(sep, "false") == 0) {
  2168. kvo.bool_value = false;
  2169. } else {
  2170. fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
  2171. invalid_param = true;
  2172. break;
  2173. }
  2174. } else {
  2175. fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
  2176. invalid_param = true;
  2177. break;
  2178. }
  2179. params.kv_overrides.push_back(kvo);
  2180. }
  2181. else
  2182. {
  2183. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  2184. server_print_usage(argv[0], default_params, default_sparams);
  2185. exit(1);
  2186. }
  2187. }
  2188. if (!params.kv_overrides.empty()) {
  2189. params.kv_overrides.emplace_back();
  2190. params.kv_overrides.back().key[0] = 0;
  2191. }
  2192. if (invalid_param)
  2193. {
  2194. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  2195. server_print_usage(argv[0], default_params, default_sparams);
  2196. exit(1);
  2197. }
  2198. }
  2199. /* llama.cpp completion api semantics */
  2200. static json format_partial_response(
  2201. llama_server_context &llama, llama_client_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs
  2202. ) {
  2203. json res = json
  2204. {
  2205. {"content", content },
  2206. {"stop", false},
  2207. {"slot_id", slot->id },
  2208. {"multimodal", llama.multimodal }
  2209. };
  2210. if (slot->sparams.n_probs > 0)
  2211. {
  2212. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  2213. }
  2214. return res;
  2215. }
  2216. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  2217. {
  2218. return json{
  2219. {"tokens", tokens}};
  2220. }
  2221. static json format_detokenized_response(std::string content)
  2222. {
  2223. return json{
  2224. {"content", content}};
  2225. }
  2226. static void log_server_request(const httplib::Request &req, const httplib::Response &res)
  2227. {
  2228. LOG_INFO("request", {
  2229. {"remote_addr", req.remote_addr},
  2230. {"remote_port", req.remote_port},
  2231. {"status", res.status},
  2232. {"method", req.method},
  2233. {"path", req.path},
  2234. {"params", req.params},
  2235. });
  2236. LOG_VERBOSE("request", {
  2237. {"request", req.body},
  2238. {"response", res.body},
  2239. });
  2240. }
  2241. struct token_translator
  2242. {
  2243. llama_context * ctx;
  2244. std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
  2245. std::string operator()(const completion_token_output &cto) const { return (*this)(cto.tok); }
  2246. };
  2247. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama, llama_client_slot *slot)
  2248. {
  2249. auto & gtps = slot->generated_token_probs;
  2250. auto translator = token_translator{llama.ctx};
  2251. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  2252. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  2253. if (slot->generated_text.capacity() < slot->generated_text.size() + len)
  2254. {
  2255. slot->generated_text.reserve(slot->generated_text.size() + len);
  2256. }
  2257. for (const completion_token_output & cto : gtps)
  2258. {
  2259. slot->generated_text += translator(cto);
  2260. }
  2261. }
  2262. std::function<void(int)> shutdown_handler;
  2263. inline void signal_handler(int signal) { shutdown_handler(signal); }
  2264. int main(int argc, char **argv)
  2265. {
  2266. #if SERVER_VERBOSE != 1
  2267. log_disable();
  2268. #endif
  2269. // own arguments required by this example
  2270. gpt_params params;
  2271. server_params sparams;
  2272. // struct that contains llama context and inference
  2273. llama_server_context llama;
  2274. server_params_parse(argc, argv, sparams, params, llama);
  2275. if (params.model_alias == "unknown")
  2276. {
  2277. params.model_alias = params.model;
  2278. }
  2279. llama_backend_init();
  2280. llama_numa_init(params.numa);
  2281. LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
  2282. {"commit", LLAMA_COMMIT}});
  2283. LOG_INFO("system info", {
  2284. {"n_threads", params.n_threads},
  2285. {"n_threads_batch", params.n_threads_batch},
  2286. {"total_threads", std::thread::hardware_concurrency()},
  2287. {"system_info", llama_print_system_info()},
  2288. });
  2289. httplib::Server svr;
  2290. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2291. svr.set_default_headers({{"Server", "llama.cpp"}});
  2292. // CORS preflight
  2293. svr.Options(R"(.*)", [](const httplib::Request &req, httplib::Response &res) {
  2294. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2295. res.set_header("Access-Control-Allow-Credentials", "true");
  2296. res.set_header("Access-Control-Allow-Methods", "POST");
  2297. res.set_header("Access-Control-Allow-Headers", "*");
  2298. });
  2299. svr.Get("/health", [&](const httplib::Request&, httplib::Response& res) {
  2300. server_state current_state = state.load();
  2301. switch(current_state) {
  2302. case SERVER_STATE_READY:
  2303. if (llama.all_slots_are_idle) {
  2304. res.set_content(R"({"status": "ok"})", "application/json");
  2305. res.status = 200; // HTTP OK
  2306. } else {
  2307. int available_slots = 0;
  2308. int processing_slots = 0;
  2309. for (llama_client_slot & slot : llama.slots) {
  2310. if (slot.available()) {
  2311. available_slots++;
  2312. } else {
  2313. processing_slots++;
  2314. }
  2315. }
  2316. if (available_slots > 0) {
  2317. json health = {
  2318. {"status", "ok"},
  2319. {"slots_idle", available_slots},
  2320. {"slots_processing", processing_slots}};
  2321. res.set_content(health.dump(), "application/json");
  2322. res.status = 200; // HTTP OK
  2323. } else {
  2324. json health = {
  2325. {"status", "no slot available"},
  2326. {"slots_idle", available_slots},
  2327. {"slots_processing", processing_slots}};
  2328. res.set_content(health.dump(), "application/json");
  2329. res.status = 503; // HTTP Service Unavailable
  2330. }
  2331. }
  2332. break;
  2333. case SERVER_STATE_LOADING_MODEL:
  2334. res.set_content(R"({"status": "loading model"})", "application/json");
  2335. res.status = 503; // HTTP Service Unavailable
  2336. break;
  2337. case SERVER_STATE_ERROR:
  2338. res.set_content(R"({"status": "error", "error": "Model failed to load"})", "application/json");
  2339. res.status = 500; // HTTP Internal Server Error
  2340. break;
  2341. }
  2342. });
  2343. if (sparams.slots_endpoint) {
  2344. svr.Get("/slots", [&](const httplib::Request&, httplib::Response& res) {
  2345. json slots;
  2346. for (llama_client_slot & slot : llama.slots) {
  2347. json slot_data = llama.get_formated_generation(slot);
  2348. slot_data["id"] = slot.id;
  2349. slot_data["task_id"] = slot.task_id;
  2350. slot_data["state"] = slot.state;
  2351. slot_data["prompt"] = slot.prompt;
  2352. slot_data["next_token"] = {
  2353. {"has_next_token", slot.has_next_token},
  2354. {"n_remain", slot.n_remaining},
  2355. {"num_tokens_predicted", slot.n_decoded},
  2356. {"stopped_eos", slot.stopped_eos},
  2357. {"stopped_word", slot.stopped_word},
  2358. {"stopped_limit", slot.stopped_limit},
  2359. {"stopping_word", slot.stopping_word},
  2360. };
  2361. slots.push_back(slot_data);
  2362. }
  2363. res.set_content(slots.dump(), "application/json");
  2364. res.status = 200; // HTTP OK
  2365. });
  2366. }
  2367. svr.set_logger(log_server_request);
  2368. svr.set_exception_handler([](const httplib::Request &, httplib::Response &res, std::exception_ptr ep)
  2369. {
  2370. const char fmt[] = "500 Internal Server Error\n%s";
  2371. char buf[BUFSIZ];
  2372. try
  2373. {
  2374. std::rethrow_exception(std::move(ep));
  2375. }
  2376. catch (std::exception &e)
  2377. {
  2378. snprintf(buf, sizeof(buf), fmt, e.what());
  2379. }
  2380. catch (...)
  2381. {
  2382. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  2383. }
  2384. res.set_content(buf, "text/plain; charset=utf-8");
  2385. res.status = 500;
  2386. });
  2387. svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
  2388. {
  2389. if (res.status == 401)
  2390. {
  2391. res.set_content("Unauthorized", "text/plain; charset=utf-8");
  2392. }
  2393. if (res.status == 400)
  2394. {
  2395. res.set_content("Invalid request", "text/plain; charset=utf-8");
  2396. }
  2397. else if (res.status == 404)
  2398. {
  2399. res.set_content("File Not Found", "text/plain; charset=utf-8");
  2400. res.status = 404;
  2401. }
  2402. });
  2403. // set timeouts and change hostname and port
  2404. svr.set_read_timeout (sparams.read_timeout);
  2405. svr.set_write_timeout(sparams.write_timeout);
  2406. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  2407. {
  2408. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  2409. return 1;
  2410. }
  2411. // Set the base directory for serving static files
  2412. svr.set_base_dir(sparams.public_path);
  2413. // to make it ctrl+clickable:
  2414. LOG_TEE("\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  2415. std::unordered_map<std::string, std::string> log_data;
  2416. log_data["hostname"] = sparams.hostname;
  2417. log_data["port"] = std::to_string(sparams.port);
  2418. if (sparams.api_keys.size() == 1) {
  2419. log_data["api_key"] = "api_key: ****" + sparams.api_keys[0].substr(sparams.api_keys[0].length() - 4);
  2420. } else if (sparams.api_keys.size() > 1) {
  2421. log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
  2422. }
  2423. LOG_INFO("HTTP server listening", log_data);
  2424. // run the HTTP server in a thread - see comment below
  2425. std::thread t([&]()
  2426. {
  2427. if (!svr.listen_after_bind())
  2428. {
  2429. state.store(SERVER_STATE_ERROR);
  2430. return 1;
  2431. }
  2432. return 0;
  2433. });
  2434. // load the model
  2435. if (!llama.load_model(params))
  2436. {
  2437. state.store(SERVER_STATE_ERROR);
  2438. return 1;
  2439. } else {
  2440. llama.initialize();
  2441. state.store(SERVER_STATE_READY);
  2442. LOG_INFO("model loaded", {});
  2443. }
  2444. // Middleware for API key validation
  2445. auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
  2446. // If API key is not set, skip validation
  2447. if (sparams.api_keys.empty()) {
  2448. return true;
  2449. }
  2450. // Check for API key in the header
  2451. auto auth_header = req.get_header_value("Authorization");
  2452. std::string prefix = "Bearer ";
  2453. if (auth_header.substr(0, prefix.size()) == prefix) {
  2454. std::string received_api_key = auth_header.substr(prefix.size());
  2455. if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
  2456. return true; // API key is valid
  2457. }
  2458. }
  2459. // API key is invalid or not provided
  2460. res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8");
  2461. res.status = 401; // Unauthorized
  2462. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2463. return false;
  2464. };
  2465. // this is only called if no index.html is found in the public --path
  2466. svr.Get("/", [](const httplib::Request &, httplib::Response &res)
  2467. {
  2468. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html; charset=utf-8");
  2469. return false;
  2470. });
  2471. // this is only called if no index.js is found in the public --path
  2472. svr.Get("/index.js", [](const httplib::Request &, httplib::Response &res)
  2473. {
  2474. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript; charset=utf-8");
  2475. return false;
  2476. });
  2477. // this is only called if no index.html is found in the public --path
  2478. svr.Get("/completion.js", [](const httplib::Request &, httplib::Response &res)
  2479. {
  2480. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript; charset=utf-8");
  2481. return false;
  2482. });
  2483. // this is only called if no index.html is found in the public --path
  2484. svr.Get("/json-schema-to-grammar.mjs", [](const httplib::Request &, httplib::Response &res)
  2485. {
  2486. res.set_content(reinterpret_cast<const char*>(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript; charset=utf-8");
  2487. return false;
  2488. });
  2489. svr.Get("/props", [&llama](const httplib::Request & req, httplib::Response &res)
  2490. {
  2491. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2492. json data = {
  2493. { "user_name", llama.name_user.c_str() },
  2494. { "assistant_name", llama.name_assistant.c_str() },
  2495. { "default_generation_settings", llama.default_generation_settings_for_props },
  2496. { "total_slots", llama.params.n_parallel }
  2497. };
  2498. res.set_content(data.dump(), "application/json; charset=utf-8");
  2499. });
  2500. svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2501. {
  2502. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2503. if (!validate_api_key(req, res)) {
  2504. return;
  2505. }
  2506. json data = json::parse(req.body);
  2507. const int task_id = llama.queue_tasks.get_new_id();
  2508. llama.queue_results.add_waiting_task_id(task_id);
  2509. llama.request_completion(task_id, data, false, false, -1);
  2510. if (!json_value(data, "stream", false)) {
  2511. std::string completion_text;
  2512. task_result result = llama.queue_results.recv(task_id);
  2513. if (!result.error && result.stop) {
  2514. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2515. }
  2516. else
  2517. {
  2518. res.status = 404;
  2519. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2520. }
  2521. llama.queue_results.remove_waiting_task_id(task_id);
  2522. } else {
  2523. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink)
  2524. {
  2525. while (true)
  2526. {
  2527. task_result result = llama.queue_results.recv(task_id);
  2528. if (!result.error) {
  2529. const std::string str =
  2530. "data: " +
  2531. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2532. "\n\n";
  2533. LOG_VERBOSE("data stream", {
  2534. { "to_send", str }
  2535. });
  2536. if (!sink.write(str.c_str(), str.size()))
  2537. {
  2538. llama.queue_results.remove_waiting_task_id(task_id);
  2539. return false;
  2540. }
  2541. if (result.stop) {
  2542. break;
  2543. }
  2544. } else {
  2545. const std::string str =
  2546. "error: " +
  2547. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2548. "\n\n";
  2549. LOG_VERBOSE("data stream", {
  2550. { "to_send", str }
  2551. });
  2552. if (!sink.write(str.c_str(), str.size()))
  2553. {
  2554. llama.queue_results.remove_waiting_task_id(task_id);
  2555. return false;
  2556. }
  2557. break;
  2558. }
  2559. }
  2560. llama.queue_results.remove_waiting_task_id(task_id);
  2561. sink.done();
  2562. return true;
  2563. };
  2564. auto on_complete = [task_id, &llama] (bool)
  2565. {
  2566. // cancel
  2567. llama.request_cancel(task_id);
  2568. llama.queue_results.remove_waiting_task_id(task_id);
  2569. };
  2570. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2571. }
  2572. });
  2573. svr.Get("/v1/models", [&params](const httplib::Request& req, httplib::Response& res)
  2574. {
  2575. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2576. std::time_t t = std::time(0);
  2577. json models = {
  2578. {"object", "list"},
  2579. {"data", {
  2580. {
  2581. {"id", params.model_alias},
  2582. {"object", "model"},
  2583. {"created", t},
  2584. {"owned_by", "llamacpp"}
  2585. },
  2586. }}
  2587. };
  2588. res.set_content(models.dump(), "application/json; charset=utf-8");
  2589. });
  2590. // TODO: add mount point without "/v1" prefix -- how?
  2591. svr.Post("/v1/chat/completions", [&llama, &validate_api_key, &sparams](const httplib::Request &req, httplib::Response &res)
  2592. {
  2593. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2594. if (!validate_api_key(req, res)) {
  2595. return;
  2596. }
  2597. json data = oaicompat_completion_params_parse(json::parse(req.body), sparams.chat_template);
  2598. const int task_id = llama.queue_tasks.get_new_id();
  2599. llama.queue_results.add_waiting_task_id(task_id);
  2600. llama.request_completion(task_id, data, false, false, -1);
  2601. if (!json_value(data, "stream", false)) {
  2602. std::string completion_text;
  2603. task_result result = llama.queue_results.recv(task_id);
  2604. if (!result.error && result.stop) {
  2605. json oaicompat_result = format_final_response_oaicompat(data, result);
  2606. res.set_content(oaicompat_result.dump(-1, ' ', false,
  2607. json::error_handler_t::replace),
  2608. "application/json; charset=utf-8");
  2609. } else {
  2610. res.status = 500;
  2611. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2612. }
  2613. llama.queue_results.remove_waiting_task_id(task_id);
  2614. } else {
  2615. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink &sink) {
  2616. while (true) {
  2617. task_result llama_result = llama.queue_results.recv(task_id);
  2618. if (!llama_result.error) {
  2619. std::vector<json> result_array = format_partial_response_oaicompat( llama_result);
  2620. for (auto it = result_array.begin(); it != result_array.end(); ++it)
  2621. {
  2622. if (!it->empty()) {
  2623. const std::string str =
  2624. "data: " +
  2625. it->dump(-1, ' ', false, json::error_handler_t::replace) +
  2626. "\n\n";
  2627. LOG_VERBOSE("data stream", {{"to_send", str}});
  2628. if (!sink.write(str.c_str(), str.size())) {
  2629. llama.queue_results.remove_waiting_task_id(task_id);
  2630. return false;
  2631. }
  2632. }
  2633. }
  2634. if (llama_result.stop) {
  2635. break;
  2636. }
  2637. } else {
  2638. const std::string str =
  2639. "error: " +
  2640. llama_result.result_json.dump(-1, ' ', false,
  2641. json::error_handler_t::replace) +
  2642. "\n\n";
  2643. LOG_VERBOSE("data stream", {{"to_send", str}});
  2644. if (!sink.write(str.c_str(), str.size())) {
  2645. llama.queue_results.remove_waiting_task_id(task_id);
  2646. return false;
  2647. }
  2648. break;
  2649. }
  2650. }
  2651. sink.done();
  2652. llama.queue_results.remove_waiting_task_id(task_id);
  2653. return true;
  2654. };
  2655. auto on_complete = [task_id, &llama](bool) {
  2656. // cancel request
  2657. llama.request_cancel(task_id);
  2658. llama.queue_results.remove_waiting_task_id(task_id);
  2659. };
  2660. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2661. }
  2662. });
  2663. svr.Post("/infill", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2664. {
  2665. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2666. if (!validate_api_key(req, res)) {
  2667. return;
  2668. }
  2669. json data = json::parse(req.body);
  2670. const int task_id = llama.queue_tasks.get_new_id();
  2671. llama.queue_results.add_waiting_task_id(task_id);
  2672. llama.request_completion(task_id, data, true, false, -1);
  2673. if (!json_value(data, "stream", false)) {
  2674. std::string completion_text;
  2675. task_result result = llama.queue_results.recv(task_id);
  2676. if (!result.error && result.stop)
  2677. {
  2678. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2679. }
  2680. else
  2681. {
  2682. res.status = 404;
  2683. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2684. }
  2685. llama.queue_results.remove_waiting_task_id(task_id);
  2686. } else {
  2687. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink) {
  2688. while (true)
  2689. {
  2690. task_result result = llama.queue_results.recv(task_id);
  2691. if (!result.error) {
  2692. const std::string str =
  2693. "data: " +
  2694. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2695. "\n\n";
  2696. LOG_VERBOSE("data stream", {
  2697. { "to_send", str }
  2698. });
  2699. if (!sink.write(str.c_str(), str.size()))
  2700. {
  2701. llama.queue_results.remove_waiting_task_id(task_id);
  2702. return false;
  2703. }
  2704. if (result.stop)
  2705. {
  2706. break;
  2707. }
  2708. }
  2709. else
  2710. {
  2711. break;
  2712. }
  2713. }
  2714. llama.queue_results.remove_waiting_task_id(task_id);
  2715. sink.done();
  2716. return true;
  2717. };
  2718. auto on_complete = [task_id, &llama] (bool)
  2719. {
  2720. // cancel
  2721. llama.request_cancel(task_id);
  2722. };
  2723. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2724. }
  2725. });
  2726. svr.Options(R"(/.*)", [](const httplib::Request &, httplib::Response &res)
  2727. { return res.set_content("", "application/json; charset=utf-8"); });
  2728. svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2729. {
  2730. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2731. const json body = json::parse(req.body);
  2732. std::vector<llama_token> tokens;
  2733. if (body.count("content") != 0)
  2734. {
  2735. tokens = llama.tokenize(body["content"], false);
  2736. }
  2737. const json data = format_tokenizer_response(tokens);
  2738. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2739. });
  2740. svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2741. {
  2742. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2743. const json body = json::parse(req.body);
  2744. std::string content;
  2745. if (body.count("tokens") != 0)
  2746. {
  2747. const std::vector<llama_token> tokens = body["tokens"];
  2748. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  2749. }
  2750. const json data = format_detokenized_response(content);
  2751. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2752. });
  2753. svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
  2754. {
  2755. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2756. const json body = json::parse(req.body);
  2757. json prompt;
  2758. if (body.count("content") != 0)
  2759. {
  2760. prompt = body["content"];
  2761. }
  2762. else
  2763. {
  2764. prompt = "";
  2765. }
  2766. json image_data;
  2767. if (body.count("image_data") != 0) {
  2768. image_data = body["image_data"];
  2769. }
  2770. else
  2771. {
  2772. image_data = "";
  2773. }
  2774. // create and queue the task
  2775. const int task_id = llama.queue_tasks.get_new_id();
  2776. llama.queue_results.add_waiting_task_id(task_id);
  2777. llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
  2778. // get the result
  2779. task_result result = llama.queue_results.recv(task_id);
  2780. llama.queue_results.remove_waiting_task_id(task_id);
  2781. // send the result
  2782. return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
  2783. });
  2784. svr.Post("/v1/embeddings", [&llama](const httplib::Request &req, httplib::Response &res)
  2785. {
  2786. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2787. const json body = json::parse(req.body);
  2788. json prompt;
  2789. if (body.count("input") != 0)
  2790. {
  2791. prompt = body["input"];
  2792. // batch
  2793. if(prompt.is_array()) {
  2794. json data = json::array();
  2795. int i = 0;
  2796. for (const json &elem : prompt) {
  2797. const int task_id = llama.queue_tasks.get_new_id();
  2798. llama.queue_results.add_waiting_task_id(task_id);
  2799. llama.request_completion(task_id, { {"prompt", elem}, { "n_predict", 0} }, false, true, -1);
  2800. // get the result
  2801. task_result result = llama.queue_results.recv(task_id);
  2802. llama.queue_results.remove_waiting_task_id(task_id);
  2803. json embedding = json{
  2804. {"embedding", json_value(result.result_json, "embedding", json::array())},
  2805. {"index", i++},
  2806. {"object", "embedding"}
  2807. };
  2808. data.push_back(embedding);
  2809. }
  2810. json result = format_embeddings_response_oaicompat(body, data);
  2811. return res.set_content(result.dump(), "application/json; charset=utf-8");
  2812. }
  2813. }
  2814. else
  2815. {
  2816. prompt = "";
  2817. }
  2818. // create and queue the task
  2819. const int task_id = llama.queue_tasks.get_new_id();
  2820. llama.queue_results.add_waiting_task_id(task_id);
  2821. llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}}, false, true, -1);
  2822. // get the result
  2823. task_result result = llama.queue_results.recv(task_id);
  2824. llama.queue_results.remove_waiting_task_id(task_id);
  2825. json data = json::array({json{
  2826. {"embedding", json_value(result.result_json, "embedding", json::array())},
  2827. {"index", 0},
  2828. {"object", "embedding"}
  2829. }}
  2830. );
  2831. json root = format_embeddings_response_oaicompat(body, data);
  2832. // send the result
  2833. return res.set_content(root.dump(), "application/json; charset=utf-8");
  2834. });
  2835. // GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
  2836. // "Bus error: 10" - this is on macOS, it does not crash on Linux
  2837. //std::thread t2([&]()
  2838. /*{
  2839. bool running = true;
  2840. while (running)
  2841. {
  2842. running = llama.update_slots();
  2843. }
  2844. }*/
  2845. //);
  2846. llama.queue_tasks.on_new_task(std::bind(
  2847. &llama_server_context::process_single_task, &llama, std::placeholders::_1));
  2848. llama.queue_tasks.on_finish_multitask(std::bind(
  2849. &llama_server_context::on_finish_multitask, &llama, std::placeholders::_1));
  2850. llama.queue_tasks.on_all_tasks_finished(std::bind(
  2851. &llama_server_context::run_on_all_tasks_finished, &llama));
  2852. llama.queue_results.on_multitask_update(std::bind(
  2853. &llama_server_queue::update_multitask,
  2854. &llama.queue_tasks,
  2855. std::placeholders::_1,
  2856. std::placeholders::_2,
  2857. std::placeholders::_3
  2858. ));
  2859. shutdown_handler = [&](int) {
  2860. llama.queue_tasks.terminate();
  2861. };
  2862. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2863. struct sigaction sigint_action;
  2864. sigint_action.sa_handler = signal_handler;
  2865. sigemptyset (&sigint_action.sa_mask);
  2866. sigint_action.sa_flags = 0;
  2867. sigaction(SIGINT, &sigint_action, NULL);
  2868. #elif defined (_WIN32)
  2869. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2870. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2871. };
  2872. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2873. #endif
  2874. llama.queue_tasks.start_loop();
  2875. svr.stop();
  2876. t.join();
  2877. llama_backend_free();
  2878. return 0;
  2879. }