server.cpp 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include "grammar-parser.h"
  5. #ifndef NDEBUG
  6. // crash the server in debug mode, otherwise send an http 500 error
  7. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  8. #endif
  9. #include "httplib.h"
  10. #include "json.hpp"
  11. // auto generated files (update with ./deps.sh)
  12. #include "index.html.hpp"
  13. #include "index.js.hpp"
  14. #include "completion.js.hpp"
  15. #include "json-schema-to-grammar.mjs.hpp"
  16. #include <cstddef>
  17. #ifndef SERVER_VERBOSE
  18. #define SERVER_VERBOSE 1
  19. #endif
  20. using namespace httplib;
  21. using json = nlohmann::json;
  22. struct server_params
  23. {
  24. std::string hostname = "127.0.0.1";
  25. std::string public_path = "examples/server/public";
  26. int32_t port = 8080;
  27. int32_t read_timeout = 600;
  28. int32_t write_timeout = 600;
  29. };
  30. // completion token output with probabilities
  31. struct completion_token_output
  32. {
  33. struct token_prob
  34. {
  35. llama_token tok;
  36. float prob;
  37. };
  38. std::vector<token_prob> probs;
  39. llama_token tok;
  40. };
  41. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  42. {
  43. size_t i;
  44. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  45. {
  46. }
  47. return i;
  48. }
  49. enum stop_type
  50. {
  51. STOP_FULL,
  52. STOP_PARTIAL,
  53. };
  54. static bool ends_with(const std::string &str, const std::string &suffix)
  55. {
  56. return str.size() >= suffix.size() &&
  57. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  58. }
  59. static size_t find_partial_stop_string(const std::string &stop,
  60. const std::string &text)
  61. {
  62. if (!text.empty() && !stop.empty())
  63. {
  64. const char text_last_char = text.back();
  65. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  66. {
  67. if (stop[char_index] == text_last_char)
  68. {
  69. const std::string current_partial = stop.substr(0, char_index + 1);
  70. if (ends_with(text, current_partial))
  71. {
  72. return text.size() - char_index - 1;
  73. }
  74. }
  75. }
  76. }
  77. return std::string::npos;
  78. }
  79. template <class Iter>
  80. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  81. {
  82. std::string ret;
  83. for (; begin != end; ++begin)
  84. {
  85. ret += llama_token_to_piece(ctx, *begin);
  86. }
  87. return ret;
  88. }
  89. static void server_log(const char *level, const char *function, int line,
  90. const char *message, const nlohmann::ordered_json &extra)
  91. {
  92. nlohmann::ordered_json log{
  93. {"timestamp", time(nullptr)},
  94. {"level", level},
  95. {"function", function},
  96. {"line", line},
  97. {"message", message},
  98. };
  99. if (!extra.empty())
  100. {
  101. log.merge_patch(extra);
  102. }
  103. const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
  104. printf("%.*s\n", (int)str.size(), str.data());
  105. fflush(stdout);
  106. }
  107. // format incomplete utf-8 multibyte character for output
  108. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  109. {
  110. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  111. // if the size is 1 and first bit is 1, meaning it's a partial character
  112. // (size > 1 meaning it's already a known token)
  113. if (out.size() == 1 && (out[0] & 0x80) == 0x80)
  114. {
  115. std::stringstream ss;
  116. ss << std::hex << (out[0] & 0xff);
  117. std::string res(ss.str());
  118. out = "byte: \\x" + res;
  119. }
  120. return out;
  121. }
  122. // convert a vector of completion_token_output to json
  123. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> & probs)
  124. {
  125. json out = json::array();
  126. for (const auto &prob : probs)
  127. {
  128. json probs_for_token = json::array();
  129. for (const auto &p : prob.probs)
  130. {
  131. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  132. probs_for_token.push_back(json{
  133. {"tok_str", tok_str},
  134. {"prob", p.prob},
  135. });
  136. }
  137. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  138. out.push_back(json{
  139. {"content", tok_str},
  140. {"probs", probs_for_token},
  141. });
  142. }
  143. return out;
  144. }
  145. static bool server_verbose = false;
  146. #if SERVER_VERBOSE != 1
  147. #define LOG_VERBOSE(MSG, ...)
  148. #else
  149. #define LOG_VERBOSE(MSG, ...) \
  150. do \
  151. { \
  152. if (server_verbose) \
  153. { \
  154. server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
  155. } \
  156. } while (0)
  157. #endif
  158. #define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
  159. #define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
  160. #define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  161. struct llama_server_context
  162. {
  163. bool stream = false;
  164. bool has_next_token = false;
  165. std::string generated_text;
  166. std::vector<completion_token_output> generated_token_probs;
  167. size_t num_prompt_tokens = 0;
  168. size_t num_tokens_predicted = 0;
  169. size_t n_past = 0;
  170. size_t n_remain = 0;
  171. json prompt;
  172. std::vector<llama_token> embd;
  173. std::vector<llama_token> last_n_tokens;
  174. llama_model *model = nullptr;
  175. llama_context *ctx = nullptr;
  176. gpt_params params;
  177. int n_ctx;
  178. grammar_parser::parse_state parsed_grammar;
  179. llama_grammar *grammar = nullptr;
  180. bool truncated = false;
  181. bool stopped_eos = false;
  182. bool stopped_word = false;
  183. bool stopped_limit = false;
  184. std::string stopping_word;
  185. int32_t multibyte_pending = 0;
  186. std::mutex mutex;
  187. std::unique_lock<std::mutex> lock()
  188. {
  189. return std::unique_lock<std::mutex>(mutex);
  190. }
  191. ~llama_server_context()
  192. {
  193. if (ctx)
  194. {
  195. llama_free(ctx);
  196. ctx = nullptr;
  197. }
  198. if (model)
  199. {
  200. llama_free_model(model);
  201. model = nullptr;
  202. }
  203. }
  204. void rewind()
  205. {
  206. params.antiprompt.clear();
  207. params.grammar.clear();
  208. num_prompt_tokens = 0;
  209. num_tokens_predicted = 0;
  210. generated_text = "";
  211. generated_text.reserve(n_ctx);
  212. generated_token_probs.clear();
  213. truncated = false;
  214. stopped_eos = false;
  215. stopped_word = false;
  216. stopped_limit = false;
  217. stopping_word = "";
  218. multibyte_pending = 0;
  219. n_remain = 0;
  220. n_past = 0;
  221. if (grammar != nullptr) {
  222. llama_grammar_free(grammar);
  223. grammar = nullptr;
  224. }
  225. }
  226. bool loadModel(const gpt_params &params_)
  227. {
  228. params = params_;
  229. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  230. if (model == nullptr)
  231. {
  232. LOG_ERROR("unable to load model", {{"model", params_.model}});
  233. return false;
  234. }
  235. n_ctx = llama_n_ctx(ctx);
  236. last_n_tokens.resize(n_ctx);
  237. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  238. return true;
  239. }
  240. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  241. {
  242. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  243. // or the first element of the json_prompt array is a string.
  244. std::vector<llama_token> prompt_tokens;
  245. if (json_prompt.is_array())
  246. {
  247. bool first = true;
  248. for (const auto& p : json_prompt)
  249. {
  250. if (p.is_string())
  251. {
  252. auto s = p.template get<std::string>();
  253. std::vector<llama_token> p;
  254. if (first)
  255. {
  256. p = ::llama_tokenize(ctx, s, add_bos);
  257. first = false;
  258. }
  259. else
  260. {
  261. p = ::llama_tokenize(ctx, s, false);
  262. }
  263. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  264. }
  265. else
  266. {
  267. if (first)
  268. {
  269. first = false;
  270. }
  271. prompt_tokens.push_back(p.template get<llama_token>());
  272. }
  273. }
  274. }
  275. else
  276. {
  277. auto s = json_prompt.template get<std::string>();
  278. prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
  279. }
  280. return prompt_tokens;
  281. }
  282. bool loadGrammar()
  283. {
  284. if (!params.grammar.empty()) {
  285. parsed_grammar = grammar_parser::parse(params.grammar.c_str());
  286. // will be empty (default) if there are parse errors
  287. if (parsed_grammar.rules.empty()) {
  288. LOG_ERROR("grammar parse error", {{"grammar", params.grammar}});
  289. return false;
  290. }
  291. grammar_parser::print_grammar(stderr, parsed_grammar);
  292. {
  293. auto it = params.logit_bias.find(llama_token_eos(ctx));
  294. if (it != params.logit_bias.end() && it->second == -INFINITY) {
  295. LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
  296. }
  297. }
  298. std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
  299. grammar = llama_grammar_init(
  300. grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
  301. }
  302. return true;
  303. }
  304. void loadPrompt()
  305. {
  306. auto prompt_tokens = tokenize(prompt, true); // always add BOS
  307. num_prompt_tokens = prompt_tokens.size();
  308. if (params.n_keep < 0)
  309. {
  310. params.n_keep = (int)num_prompt_tokens;
  311. }
  312. params.n_keep = std::min(n_ctx - 4, params.n_keep);
  313. // if input prompt is too big, truncate like normal
  314. if (num_prompt_tokens >= (size_t)n_ctx)
  315. {
  316. const int n_left = (n_ctx - params.n_keep) / 2;
  317. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  318. const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
  319. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  320. std::copy(prompt_tokens.end() - n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  321. LOG_VERBOSE("input truncated", {
  322. {"n_ctx", n_ctx},
  323. {"n_keep", params.n_keep},
  324. {"n_left", n_left},
  325. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  326. });
  327. truncated = true;
  328. prompt_tokens = new_tokens;
  329. }
  330. else
  331. {
  332. const size_t ps = num_prompt_tokens;
  333. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  334. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  335. }
  336. // compare the evaluated prompt with the new prompt
  337. n_past = common_part(embd, prompt_tokens);
  338. // since #3228 we now have to manually manage the KV cache
  339. llama_kv_cache_seq_rm(ctx, 0, n_past, params.n_ctx);
  340. embd = prompt_tokens;
  341. if (n_past == num_prompt_tokens)
  342. {
  343. // we have to evaluate at least 1 token to generate logits.
  344. n_past--;
  345. }
  346. LOG_VERBOSE("prompt ingested", {
  347. {"n_past", n_past},
  348. {"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
  349. {"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  350. });
  351. has_next_token = true;
  352. }
  353. void beginCompletion()
  354. {
  355. // number of tokens to keep when resetting context
  356. n_remain = params.n_predict;
  357. llama_set_rng_seed(ctx, params.seed);
  358. }
  359. completion_token_output nextToken()
  360. {
  361. completion_token_output result;
  362. result.tok = -1;
  363. if (embd.size() >= (size_t)n_ctx)
  364. {
  365. // Shift context
  366. const int n_left = n_past - params.n_keep - 1;
  367. const int n_discard = n_left/2;
  368. llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1);
  369. llama_kv_cache_seq_shift(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);
  370. for (size_t i = params.n_keep + 1 + n_discard; i < embd.size(); i++)
  371. {
  372. embd[i - n_discard] = embd[i];
  373. }
  374. embd.resize(embd.size() - n_discard);
  375. n_past -= n_discard;
  376. truncated = true;
  377. LOG_VERBOSE("input truncated", {
  378. {"n_ctx", n_ctx},
  379. {"n_keep", params.n_keep},
  380. {"n_left", n_left},
  381. });
  382. }
  383. while (n_past < embd.size())
  384. {
  385. int n_eval = (int)embd.size() - n_past;
  386. if (n_eval > params.n_batch)
  387. {
  388. n_eval = params.n_batch;
  389. }
  390. if (llama_decode(ctx, llama_batch_get_one(&embd[n_past], n_eval, n_past, 0)))
  391. {
  392. LOG_ERROR("failed to eval", {
  393. {"n_eval", n_eval},
  394. {"n_past", n_past},
  395. {"embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  396. });
  397. has_next_token = false;
  398. return result;
  399. }
  400. n_past += n_eval;
  401. }
  402. if (params.n_predict == 0)
  403. {
  404. has_next_token = false;
  405. result.tok = llama_token_eos(ctx);
  406. return result;
  407. }
  408. // out of user input, sample next token
  409. const float temp = params.temp;
  410. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(model) : params.top_k;
  411. const float top_p = params.top_p;
  412. const float tfs_z = params.tfs_z;
  413. const float typical_p = params.typical_p;
  414. const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
  415. const float repeat_penalty = params.repeat_penalty;
  416. const float alpha_presence = params.presence_penalty;
  417. const float alpha_frequency = params.frequency_penalty;
  418. const int mirostat = params.mirostat;
  419. const float mirostat_tau = params.mirostat_tau;
  420. const float mirostat_eta = params.mirostat_eta;
  421. const bool penalize_nl = params.penalize_nl;
  422. const int32_t n_probs = params.n_probs;
  423. {
  424. auto *logits = llama_get_logits(ctx);
  425. auto n_vocab = llama_n_vocab(model);
  426. // Apply params.logit_bias map
  427. for (const auto &it : params.logit_bias)
  428. {
  429. logits[it.first] += it.second;
  430. }
  431. std::vector<llama_token_data> candidates;
  432. candidates.reserve(n_vocab);
  433. for (llama_token token_id = 0; token_id < n_vocab; token_id++)
  434. {
  435. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  436. }
  437. llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
  438. // Apply penalties
  439. float nl_logit = logits[llama_token_nl(ctx)];
  440. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
  441. llama_sample_repetition_penalty(ctx, &candidates_p,
  442. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  443. last_n_repeat, repeat_penalty);
  444. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  445. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  446. last_n_repeat, alpha_frequency, alpha_presence);
  447. if (!penalize_nl)
  448. {
  449. logits[llama_token_nl(ctx)] = nl_logit;
  450. }
  451. if (grammar != nullptr) {
  452. llama_sample_grammar(ctx, &candidates_p, grammar);
  453. }
  454. if (temp <= 0)
  455. {
  456. // Greedy sampling
  457. result.tok = llama_sample_token_greedy(ctx, &candidates_p);
  458. if (n_probs > 0)
  459. {
  460. llama_sample_softmax(ctx, &candidates_p);
  461. }
  462. }
  463. else
  464. {
  465. if (mirostat == 1)
  466. {
  467. static float mirostat_mu = 2.0f * mirostat_tau;
  468. const int mirostat_m = 100;
  469. llama_sample_temp(ctx, &candidates_p, temp);
  470. result.tok = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  471. }
  472. else if (mirostat == 2)
  473. {
  474. static float mirostat_mu = 2.0f * mirostat_tau;
  475. llama_sample_temp(ctx, &candidates_p, temp);
  476. result.tok = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  477. }
  478. else
  479. {
  480. // Temperature sampling
  481. size_t min_keep = std::max(1, n_probs);
  482. llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
  483. llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
  484. llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
  485. llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
  486. llama_sample_temp(ctx, &candidates_p, temp);
  487. result.tok = llama_sample_token(ctx, &candidates_p);
  488. }
  489. }
  490. if (grammar != nullptr) {
  491. llama_grammar_accept_token(ctx, grammar, result.tok);
  492. }
  493. for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
  494. {
  495. result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
  496. }
  497. last_n_tokens.erase(last_n_tokens.begin());
  498. last_n_tokens.push_back(result.tok);
  499. num_tokens_predicted++;
  500. }
  501. // add it to the context
  502. embd.push_back(result.tok);
  503. // decrement remaining sampling budget
  504. --n_remain;
  505. if (!embd.empty() && embd.back() == llama_token_eos(ctx))
  506. {
  507. // stopping_word = llama_token_to_piece(ctx, embd.back());
  508. has_next_token = false;
  509. stopped_eos = true;
  510. LOG_VERBOSE("eos token found", {});
  511. return result;
  512. }
  513. has_next_token = params.n_predict == -1 || n_remain != 0;
  514. return result;
  515. }
  516. size_t findStoppingStrings(const std::string &text, const size_t last_token_size,
  517. const stop_type type)
  518. {
  519. size_t stop_pos = std::string::npos;
  520. for (const std::string &word : params.antiprompt)
  521. {
  522. size_t pos;
  523. if (type == STOP_FULL)
  524. {
  525. const size_t tmp = word.size() + last_token_size;
  526. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  527. pos = text.find(word, from_pos);
  528. }
  529. else
  530. {
  531. pos = find_partial_stop_string(word, text);
  532. }
  533. if (pos != std::string::npos &&
  534. (stop_pos == std::string::npos || pos < stop_pos))
  535. {
  536. if (type == STOP_FULL)
  537. {
  538. stopping_word = word;
  539. stopped_word = true;
  540. has_next_token = false;
  541. }
  542. stop_pos = pos;
  543. }
  544. }
  545. return stop_pos;
  546. }
  547. completion_token_output doCompletion()
  548. {
  549. auto token_with_probs = nextToken();
  550. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(ctx, token_with_probs.tok);
  551. generated_text += token_text;
  552. if (params.n_probs > 0)
  553. {
  554. generated_token_probs.push_back(token_with_probs);
  555. }
  556. if (multibyte_pending > 0)
  557. {
  558. multibyte_pending -= token_text.size();
  559. }
  560. else if (token_text.size() == 1)
  561. {
  562. const char c = token_text[0];
  563. // 2-byte characters: 110xxxxx 10xxxxxx
  564. if ((c & 0xE0) == 0xC0)
  565. {
  566. multibyte_pending = 1;
  567. // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
  568. }
  569. else if ((c & 0xF0) == 0xE0)
  570. {
  571. multibyte_pending = 2;
  572. // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
  573. }
  574. else if ((c & 0xF8) == 0xF0)
  575. {
  576. multibyte_pending = 3;
  577. }
  578. else
  579. {
  580. multibyte_pending = 0;
  581. }
  582. }
  583. if (multibyte_pending > 0 && !has_next_token)
  584. {
  585. has_next_token = true;
  586. n_remain++;
  587. }
  588. if (!has_next_token && n_remain == 0)
  589. {
  590. stopped_limit = true;
  591. }
  592. LOG_VERBOSE("next token", {
  593. {"token", token_with_probs.tok},
  594. {"token_text", tokens_to_output_formatted_string(ctx, token_with_probs.tok)},
  595. {"has_next_token", has_next_token},
  596. {"n_remain", n_remain},
  597. {"num_tokens_predicted", num_tokens_predicted},
  598. {"stopped_eos", stopped_eos},
  599. {"stopped_word", stopped_word},
  600. {"stopped_limit", stopped_limit},
  601. {"stopping_word", stopping_word},
  602. });
  603. return token_with_probs;
  604. }
  605. std::vector<float> getEmbedding()
  606. {
  607. static const int n_embd = llama_n_embd(model);
  608. if (!params.embedding)
  609. {
  610. LOG_WARNING("embedding disabled", {
  611. {"params.embedding", params.embedding},
  612. });
  613. return std::vector<float>(n_embd, 0.0f);
  614. }
  615. const float *data = llama_get_embeddings(ctx);
  616. std::vector<float> embedding(data, data + n_embd);
  617. return embedding;
  618. }
  619. };
  620. static void server_print_usage(const char *argv0, const gpt_params &params,
  621. const server_params &sparams)
  622. {
  623. printf("usage: %s [options]\n", argv0);
  624. printf("\n");
  625. printf("options:\n");
  626. printf(" -h, --help show this help message and exit\n");
  627. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  628. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  629. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  630. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  631. printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n");
  632. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  633. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  634. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  635. if (llama_mlock_supported())
  636. {
  637. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  638. }
  639. if (llama_mmap_supported())
  640. {
  641. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  642. }
  643. printf(" --numa attempt optimizations that help on some NUMA systems\n");
  644. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  645. printf(" -ngl N, --n-gpu-layers N\n");
  646. printf(" number of layers to store in VRAM\n");
  647. printf(" -ts SPLIT --tensor-split SPLIT\n");
  648. printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  649. printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
  650. printf(" -nommq, --no-mul-mat-q\n");
  651. printf(" use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
  652. printf(" Not recommended since this is both slower and uses more VRAM.\n");
  653. #endif
  654. printf(" -m FNAME, --model FNAME\n");
  655. printf(" model path (default: %s)\n", params.model.c_str());
  656. printf(" -a ALIAS, --alias ALIAS\n");
  657. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  658. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  659. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  660. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  661. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  662. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  663. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  664. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  665. printf("\n");
  666. }
  667. static void server_params_parse(int argc, char **argv, server_params &sparams,
  668. gpt_params &params)
  669. {
  670. gpt_params default_params;
  671. server_params default_sparams;
  672. std::string arg;
  673. bool invalid_param = false;
  674. for (int i = 1; i < argc; i++)
  675. {
  676. arg = argv[i];
  677. if (arg == "--port")
  678. {
  679. if (++i >= argc)
  680. {
  681. invalid_param = true;
  682. break;
  683. }
  684. sparams.port = std::stoi(argv[i]);
  685. }
  686. else if (arg == "--host")
  687. {
  688. if (++i >= argc)
  689. {
  690. invalid_param = true;
  691. break;
  692. }
  693. sparams.hostname = argv[i];
  694. }
  695. else if (arg == "--path")
  696. {
  697. if (++i >= argc)
  698. {
  699. invalid_param = true;
  700. break;
  701. }
  702. sparams.public_path = argv[i];
  703. }
  704. else if (arg == "--timeout" || arg == "-to")
  705. {
  706. if (++i >= argc)
  707. {
  708. invalid_param = true;
  709. break;
  710. }
  711. sparams.read_timeout = std::stoi(argv[i]);
  712. sparams.write_timeout = std::stoi(argv[i]);
  713. }
  714. else if (arg == "-m" || arg == "--model")
  715. {
  716. if (++i >= argc)
  717. {
  718. invalid_param = true;
  719. break;
  720. }
  721. params.model = argv[i];
  722. }
  723. else if (arg == "-a" || arg == "--alias")
  724. {
  725. if (++i >= argc)
  726. {
  727. invalid_param = true;
  728. break;
  729. }
  730. params.model_alias = argv[i];
  731. }
  732. else if (arg == "-h" || arg == "--help")
  733. {
  734. server_print_usage(argv[0], default_params, default_sparams);
  735. exit(0);
  736. }
  737. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  738. {
  739. if (++i >= argc)
  740. {
  741. invalid_param = true;
  742. break;
  743. }
  744. params.n_ctx = std::stoi(argv[i]);
  745. }
  746. else if (arg == "--rope-freq-base")
  747. {
  748. if (++i >= argc)
  749. {
  750. invalid_param = true;
  751. break;
  752. }
  753. params.rope_freq_base = std::stof(argv[i]);
  754. }
  755. else if (arg == "--rope-freq-scale")
  756. {
  757. if (++i >= argc)
  758. {
  759. invalid_param = true;
  760. break;
  761. }
  762. params.rope_freq_scale = std::stof(argv[i]);
  763. }
  764. else if (arg == "--memory-f32" || arg == "--memory_f32")
  765. {
  766. params.memory_f16 = false;
  767. }
  768. else if (arg == "--threads" || arg == "-t")
  769. {
  770. if (++i >= argc)
  771. {
  772. invalid_param = true;
  773. break;
  774. }
  775. params.n_threads = std::stoi(argv[i]);
  776. }
  777. else if (arg == "-b" || arg == "--batch-size")
  778. {
  779. if (++i >= argc)
  780. {
  781. invalid_param = true;
  782. break;
  783. }
  784. params.n_batch = std::stoi(argv[i]);
  785. params.n_batch = std::min(512, params.n_batch);
  786. }
  787. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  788. {
  789. if (++i >= argc)
  790. {
  791. invalid_param = true;
  792. break;
  793. }
  794. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  795. params.n_gpu_layers = std::stoi(argv[i]);
  796. #else
  797. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  798. "See main README.md for information on enabling GPU BLAS support",
  799. {{"n_gpu_layers", params.n_gpu_layers}});
  800. #endif
  801. }
  802. else if (arg == "--tensor-split" || arg == "-ts")
  803. {
  804. if (++i >= argc)
  805. {
  806. invalid_param = true;
  807. break;
  808. }
  809. #ifdef GGML_USE_CUBLAS
  810. std::string arg_next = argv[i];
  811. // split string by , and /
  812. const std::regex regex{R"([,/]+)"};
  813. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  814. std::vector<std::string> split_arg{it, {}};
  815. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  816. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
  817. {
  818. if (i_device < split_arg.size())
  819. {
  820. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  821. }
  822. else
  823. {
  824. params.tensor_split[i_device] = 0.0f;
  825. }
  826. }
  827. #else
  828. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  829. #endif // GGML_USE_CUBLAS
  830. }
  831. else if (arg == "--no-mul-mat-q" || arg == "-nommq")
  832. {
  833. #ifdef GGML_USE_CUBLAS
  834. params.mul_mat_q = false;
  835. #else
  836. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
  837. #endif // GGML_USE_CUBLAS
  838. }
  839. else if (arg == "--main-gpu" || arg == "-mg")
  840. {
  841. if (++i >= argc)
  842. {
  843. invalid_param = true;
  844. break;
  845. }
  846. #ifdef GGML_USE_CUBLAS
  847. params.main_gpu = std::stoi(argv[i]);
  848. #else
  849. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  850. #endif
  851. }
  852. else if (arg == "--lora")
  853. {
  854. if (++i >= argc)
  855. {
  856. invalid_param = true;
  857. break;
  858. }
  859. params.lora_adapter.push_back({argv[i], 1.0f});
  860. params.use_mmap = false;
  861. }
  862. else if (arg == "--lora-scaled")
  863. {
  864. if (++i >= argc)
  865. {
  866. invalid_param = true;
  867. break;
  868. }
  869. const char * lora_adapter = argv[i];
  870. if (++i >= argc)
  871. {
  872. invalid_param = true;
  873. break;
  874. }
  875. params.lora_adapter.push_back({lora_adapter, std::stof(argv[i])});
  876. params.use_mmap = false;
  877. }
  878. else if (arg == "--lora-base")
  879. {
  880. if (++i >= argc)
  881. {
  882. invalid_param = true;
  883. break;
  884. }
  885. params.lora_base = argv[i];
  886. }
  887. else if (arg == "-v" || arg == "--verbose")
  888. {
  889. #if SERVER_VERBOSE != 1
  890. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  891. #else
  892. server_verbose = true;
  893. #endif
  894. }
  895. else if (arg == "--mlock")
  896. {
  897. params.use_mlock = true;
  898. }
  899. else if (arg == "--no-mmap")
  900. {
  901. params.use_mmap = false;
  902. }
  903. else if (arg == "--numa")
  904. {
  905. params.numa = true;
  906. }
  907. else if (arg == "--embedding")
  908. {
  909. params.embedding = true;
  910. }
  911. else
  912. {
  913. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  914. server_print_usage(argv[0], default_params, default_sparams);
  915. exit(1);
  916. }
  917. }
  918. if (invalid_param)
  919. {
  920. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  921. server_print_usage(argv[0], default_params, default_sparams);
  922. exit(1);
  923. }
  924. }
  925. static json format_generation_settings(llama_server_context &llama)
  926. {
  927. const auto eos_bias = llama.params.logit_bias.find(llama_token_eos(llama.ctx));
  928. const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
  929. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  930. return json{
  931. {"n_ctx", llama.n_ctx},
  932. {"model", llama.params.model_alias},
  933. {"seed", llama.params.seed},
  934. {"temp", llama.params.temp},
  935. {"top_k", llama.params.top_k},
  936. {"top_p", llama.params.top_p},
  937. {"tfs_z", llama.params.tfs_z},
  938. {"typical_p", llama.params.typical_p},
  939. {"repeat_last_n", llama.params.repeat_last_n},
  940. {"repeat_penalty", llama.params.repeat_penalty},
  941. {"presence_penalty", llama.params.presence_penalty},
  942. {"frequency_penalty", llama.params.frequency_penalty},
  943. {"mirostat", llama.params.mirostat},
  944. {"mirostat_tau", llama.params.mirostat_tau},
  945. {"mirostat_eta", llama.params.mirostat_eta},
  946. {"penalize_nl", llama.params.penalize_nl},
  947. {"stop", llama.params.antiprompt},
  948. {"n_predict", llama.params.n_predict},
  949. {"n_keep", llama.params.n_keep},
  950. {"ignore_eos", ignore_eos},
  951. {"stream", llama.stream},
  952. {"logit_bias", llama.params.logit_bias},
  953. {"n_probs", llama.params.n_probs},
  954. {"grammar", llama.params.grammar},
  955. };
  956. }
  957. static json format_embedding_response(llama_server_context &llama)
  958. {
  959. return json{
  960. {"embedding", llama.getEmbedding()},
  961. };
  962. }
  963. static json format_timings(llama_server_context &llama)
  964. {
  965. const auto timings = llama_get_timings(llama.ctx);
  966. assert(timings.n_eval == ptrdiff_t(llama.num_tokens_predicted));
  967. return json{
  968. {"prompt_n", timings.n_p_eval},
  969. {"prompt_ms", timings.t_p_eval_ms},
  970. {"prompt_per_token_ms", timings.t_p_eval_ms / timings.n_p_eval},
  971. {"prompt_per_second", 1e3 / timings.t_p_eval_ms * timings.n_p_eval},
  972. {"predicted_n", timings.n_eval},
  973. {"predicted_ms", timings.t_eval_ms},
  974. {"predicted_per_token_ms", timings.t_eval_ms / timings.n_eval},
  975. {"predicted_per_second", 1e3 / timings.t_eval_ms * timings.n_eval},
  976. };
  977. }
  978. static json format_final_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  979. {
  980. json res = json{
  981. {"content", content},
  982. {"stop", true},
  983. {"model", llama.params.model_alias},
  984. {"tokens_predicted", llama.num_tokens_predicted},
  985. {"tokens_evaluated", llama.num_prompt_tokens},
  986. {"generation_settings", format_generation_settings(llama)},
  987. {"prompt", llama.prompt},
  988. {"truncated", llama.truncated},
  989. {"stopped_eos", llama.stopped_eos},
  990. {"stopped_word", llama.stopped_word},
  991. {"stopped_limit", llama.stopped_limit},
  992. {"stopping_word", llama.stopping_word},
  993. {"tokens_cached", llama.n_past},
  994. {"timings", format_timings(llama)},
  995. };
  996. if (llama.params.n_probs > 0)
  997. {
  998. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  999. }
  1000. return res;
  1001. }
  1002. static json format_partial_response(
  1003. llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs
  1004. ) {
  1005. json res = json{
  1006. {"content", content},
  1007. {"stop", false},
  1008. };
  1009. if (llama.params.n_probs > 0)
  1010. {
  1011. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  1012. }
  1013. return res;
  1014. }
  1015. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  1016. {
  1017. return json{
  1018. {"tokens", tokens}};
  1019. }
  1020. static json format_detokenized_response(std::string content)
  1021. {
  1022. return json{
  1023. {"content", content}};
  1024. }
  1025. template <typename T>
  1026. static T json_value(const json &body, const std::string &key, const T &default_value)
  1027. {
  1028. // Fallback null to default value
  1029. return body.contains(key) && !body.at(key).is_null()
  1030. ? body.value(key, default_value)
  1031. : default_value;
  1032. }
  1033. static void parse_options_completion(const json &body, llama_server_context &llama)
  1034. {
  1035. gpt_params default_params;
  1036. llama.stream = json_value(body, "stream", false);
  1037. llama.params.n_predict = json_value(body, "n_predict", default_params.n_predict);
  1038. llama.params.top_k = json_value(body, "top_k", default_params.top_k);
  1039. llama.params.top_p = json_value(body, "top_p", default_params.top_p);
  1040. llama.params.tfs_z = json_value(body, "tfs_z", default_params.tfs_z);
  1041. llama.params.typical_p = json_value(body, "typical_p", default_params.typical_p);
  1042. llama.params.repeat_last_n = json_value(body, "repeat_last_n", default_params.repeat_last_n);
  1043. llama.params.temp = json_value(body, "temperature", default_params.temp);
  1044. llama.params.repeat_penalty = json_value(body, "repeat_penalty", default_params.repeat_penalty);
  1045. llama.params.presence_penalty = json_value(body, "presence_penalty", default_params.presence_penalty);
  1046. llama.params.frequency_penalty = json_value(body, "frequency_penalty", default_params.frequency_penalty);
  1047. llama.params.mirostat = json_value(body, "mirostat", default_params.mirostat);
  1048. llama.params.mirostat_tau = json_value(body, "mirostat_tau", default_params.mirostat_tau);
  1049. llama.params.mirostat_eta = json_value(body, "mirostat_eta", default_params.mirostat_eta);
  1050. llama.params.penalize_nl = json_value(body, "penalize_nl", default_params.penalize_nl);
  1051. llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
  1052. llama.params.seed = json_value(body, "seed", default_params.seed);
  1053. llama.params.grammar = json_value(body, "grammar", default_params.grammar);
  1054. llama.params.n_probs = json_value(body, "n_probs", default_params.n_probs);
  1055. if (body.count("prompt") != 0)
  1056. {
  1057. llama.prompt = body["prompt"];
  1058. }
  1059. else
  1060. {
  1061. llama.prompt = "";
  1062. }
  1063. llama.params.logit_bias.clear();
  1064. if (json_value(body, "ignore_eos", false))
  1065. {
  1066. llama.params.logit_bias[llama_token_eos(llama.ctx)] = -INFINITY;
  1067. }
  1068. const auto &logit_bias = body.find("logit_bias");
  1069. if (logit_bias != body.end() && logit_bias->is_array())
  1070. {
  1071. const int n_vocab = llama_n_vocab(llama.model);
  1072. for (const auto &el : *logit_bias)
  1073. {
  1074. if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
  1075. {
  1076. llama_token tok = el[0].get<llama_token>();
  1077. if (tok >= 0 && tok < n_vocab)
  1078. {
  1079. if (el[1].is_number())
  1080. {
  1081. llama.params.logit_bias[tok] = el[1].get<float>();
  1082. }
  1083. else if (el[1].is_boolean() && !el[1].get<bool>())
  1084. {
  1085. llama.params.logit_bias[tok] = -INFINITY;
  1086. }
  1087. }
  1088. }
  1089. }
  1090. }
  1091. llama.params.antiprompt.clear();
  1092. const auto &stop = body.find("stop");
  1093. if (stop != body.end() && stop->is_array())
  1094. {
  1095. for (const auto &word : *stop)
  1096. {
  1097. if (!word.empty())
  1098. {
  1099. llama.params.antiprompt.push_back(word);
  1100. }
  1101. }
  1102. }
  1103. LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
  1104. }
  1105. static void log_server_request(const Request &req, const Response &res)
  1106. {
  1107. LOG_INFO("request", {
  1108. {"remote_addr", req.remote_addr},
  1109. {"remote_port", req.remote_port},
  1110. {"status", res.status},
  1111. {"method", req.method},
  1112. {"path", req.path},
  1113. {"params", req.params},
  1114. });
  1115. LOG_VERBOSE("request", {
  1116. {"request", req.body},
  1117. {"response", res.body},
  1118. });
  1119. }
  1120. static bool is_at_eob(llama_server_context &server_context, const llama_token *tokens, const size_t n_tokens) {
  1121. return n_tokens && tokens[n_tokens-1] == llama_token_eos(server_context.ctx);
  1122. }
  1123. // Function matching type llama_beam_search_callback_fn_t.
  1124. // Custom callback example is called each time the beams lengths increase:
  1125. // * Show progress by printing ',' following by number of convergent beam tokens if any.
  1126. // * When all beams converge to a common prefix, they are made available in beams_state.beams[0].
  1127. // This is also called when the stop condition is met.
  1128. // Collect tokens into std::vector<llama_token> response which is pointed to by callback_data.
  1129. static void beam_search_callback(void *callback_data, llama_beams_state beams_state) {
  1130. auto & llama = *static_cast<llama_server_context*>(callback_data);
  1131. // Mark beams as EOS as needed.
  1132. for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
  1133. llama_beam_view& beam_view = beams_state.beam_views[i];
  1134. if (!beam_view.eob && is_at_eob(llama, beam_view.tokens, beam_view.n_tokens)) {
  1135. beam_view.eob = true;
  1136. }
  1137. }
  1138. printf(","); // Show progress
  1139. if (const size_t n = beams_state.common_prefix_length) {
  1140. llama.generated_token_probs.resize(llama.generated_token_probs.size() + n);
  1141. assert(0u < beams_state.n_beams);
  1142. const llama_token * tokens = beams_state.beam_views[0].tokens;
  1143. const auto map = [](llama_token tok) { return completion_token_output{{},tok}; };
  1144. std::transform(tokens, tokens + n, llama.generated_token_probs.end() - n, map);
  1145. printf("%zu", n);
  1146. }
  1147. fflush(stdout);
  1148. #if 0 // DEBUG: print current beams for this iteration
  1149. std::cout << "\n\nCurrent beams:\n";
  1150. for (size_t i=0 ; i < beams_state.n_beams ; ++i) {
  1151. std::cout << "beams["<<i<<"]: " << ostream_beam_view{state.ctx,beams_state.beam_views[i]} << std::endl;
  1152. }
  1153. #endif
  1154. }
  1155. struct token_translator {
  1156. llama_context * ctx;
  1157. std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
  1158. std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
  1159. };
  1160. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama)
  1161. {
  1162. auto & gtps = llama.generated_token_probs;
  1163. auto translator = token_translator{llama.ctx};
  1164. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  1165. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  1166. if (llama.generated_text.capacity() < llama.generated_text.size() + len) {
  1167. llama.generated_text.reserve(llama.generated_text.size() + len);
  1168. }
  1169. for (const completion_token_output & cto : gtps) {
  1170. llama.generated_text += translator(cto);
  1171. }
  1172. }
  1173. int main(int argc, char **argv)
  1174. {
  1175. // own arguments required by this example
  1176. gpt_params params;
  1177. server_params sparams;
  1178. // struct that contains llama context and inference
  1179. llama_server_context llama;
  1180. server_params_parse(argc, argv, sparams, params);
  1181. if (params.model_alias == "unknown")
  1182. {
  1183. params.model_alias = params.model;
  1184. }
  1185. llama_backend_init(params.numa);
  1186. LOG_INFO("build info", {{"build", BUILD_NUMBER},
  1187. {"commit", BUILD_COMMIT}});
  1188. LOG_INFO("system info", {
  1189. {"n_threads", params.n_threads},
  1190. {"n_threads_batch", params.n_threads_batch},
  1191. {"total_threads", std::thread::hardware_concurrency()},
  1192. {"system_info", llama_print_system_info()},
  1193. });
  1194. // load the model
  1195. if (!llama.loadModel(params))
  1196. {
  1197. return 1;
  1198. }
  1199. Server svr;
  1200. svr.set_default_headers({{"Server", "llama.cpp"},
  1201. {"Access-Control-Allow-Origin", "*"},
  1202. {"Access-Control-Allow-Headers", "content-type"}});
  1203. // this is only called if no index.html is found in the public --path
  1204. svr.Get("/", [](const Request &, Response &res)
  1205. {
  1206. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html");
  1207. return false; });
  1208. // this is only called if no index.js is found in the public --path
  1209. svr.Get("/index.js", [](const Request &, Response &res)
  1210. {
  1211. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript");
  1212. return false; });
  1213. // this is only called if no index.html is found in the public --path
  1214. svr.Get("/completion.js", [](const Request &, Response &res)
  1215. {
  1216. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
  1217. return false; });
  1218. // this is only called if no index.html is found in the public --path
  1219. svr.Get("/json-schema-to-grammar.mjs", [](const Request &, Response &res)
  1220. {
  1221. res.set_content(reinterpret_cast<const char*>(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript");
  1222. return false; });
  1223. svr.Post("/completion", [&llama](const Request &req, Response &res)
  1224. {
  1225. auto lock = llama.lock();
  1226. llama.rewind();
  1227. llama_reset_timings(llama.ctx);
  1228. parse_options_completion(json::parse(req.body), llama);
  1229. if (!llama.loadGrammar())
  1230. {
  1231. res.status = 400;
  1232. return;
  1233. }
  1234. llama.loadPrompt();
  1235. llama.beginCompletion();
  1236. if (!llama.stream) {
  1237. if (llama.params.n_beams) {
  1238. // Fill llama.generated_token_probs vector with final beam.
  1239. llama_beam_search(llama.ctx, beam_search_callback, &llama, llama.params.n_beams,
  1240. llama.n_past, llama.n_remain);
  1241. // Translate llama.generated_token_probs to llama.generated_text.
  1242. append_to_generated_text_from_generated_token_probs(llama);
  1243. } else {
  1244. size_t stop_pos = std::string::npos;
  1245. while (llama.has_next_token) {
  1246. const completion_token_output token_with_probs = llama.doCompletion();
  1247. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(llama.ctx, token_with_probs.tok);
  1248. stop_pos = llama.findStoppingStrings(llama.generated_text,
  1249. token_text.size(), STOP_FULL);
  1250. }
  1251. if (stop_pos == std::string::npos) {
  1252. stop_pos = llama.findStoppingStrings(llama.generated_text, 0, STOP_PARTIAL);
  1253. }
  1254. if (stop_pos != std::string::npos) {
  1255. llama.generated_text.erase(llama.generated_text.begin() + stop_pos,
  1256. llama.generated_text.end());
  1257. }
  1258. }
  1259. auto probs = llama.generated_token_probs;
  1260. if (llama.params.n_probs > 0 && llama.stopped_word) {
  1261. const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
  1262. probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
  1263. }
  1264. const json data = format_final_response(llama, llama.generated_text, probs);
  1265. llama_print_timings(llama.ctx);
  1266. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
  1267. "application/json");
  1268. } else {
  1269. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  1270. size_t sent_count = 0;
  1271. size_t sent_token_probs_index = 0;
  1272. while (llama.has_next_token) {
  1273. const completion_token_output token_with_probs = llama.doCompletion();
  1274. if (token_with_probs.tok == -1 || llama.multibyte_pending > 0) {
  1275. continue;
  1276. }
  1277. const std::string token_text = llama_token_to_piece(llama.ctx, token_with_probs.tok);
  1278. size_t pos = std::min(sent_count, llama.generated_text.size());
  1279. const std::string str_test = llama.generated_text.substr(pos);
  1280. bool is_stop_full = false;
  1281. size_t stop_pos =
  1282. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  1283. if (stop_pos != std::string::npos) {
  1284. is_stop_full = true;
  1285. llama.generated_text.erase(
  1286. llama.generated_text.begin() + pos + stop_pos,
  1287. llama.generated_text.end());
  1288. pos = std::min(sent_count, llama.generated_text.size());
  1289. } else {
  1290. is_stop_full = false;
  1291. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  1292. STOP_PARTIAL);
  1293. }
  1294. if (
  1295. stop_pos == std::string::npos ||
  1296. // Send rest of the text if we are at the end of the generation
  1297. (!llama.has_next_token && !is_stop_full && stop_pos > 0)
  1298. ) {
  1299. const std::string to_send = llama.generated_text.substr(pos, std::string::npos);
  1300. sent_count += to_send.size();
  1301. std::vector<completion_token_output> probs_output = {};
  1302. if (llama.params.n_probs > 0) {
  1303. const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
  1304. size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
  1305. size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
  1306. if (probs_pos < probs_stop_pos) {
  1307. probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
  1308. }
  1309. sent_token_probs_index = probs_stop_pos;
  1310. }
  1311. const json data = format_partial_response(llama, to_send, probs_output);
  1312. const std::string str =
  1313. "data: " +
  1314. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1315. "\n\n";
  1316. LOG_VERBOSE("data stream", {
  1317. { "to_send", str }
  1318. });
  1319. if (!sink.write(str.data(), str.size())) {
  1320. LOG_VERBOSE("stream closed", {});
  1321. llama_print_timings(llama.ctx);
  1322. return false;
  1323. }
  1324. }
  1325. if (!llama.has_next_token) {
  1326. // Generation is done, send extra information.
  1327. const json data = format_final_response(
  1328. llama,
  1329. "",
  1330. std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
  1331. );
  1332. const std::string str =
  1333. "data: " +
  1334. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1335. "\n\n";
  1336. LOG_VERBOSE("data stream", {
  1337. { "to_send", str }
  1338. });
  1339. if (!sink.write(str.data(), str.size())) {
  1340. LOG_VERBOSE("stream closed", {});
  1341. llama_print_timings(llama.ctx);
  1342. return false;
  1343. }
  1344. }
  1345. }
  1346. llama_print_timings(llama.ctx);
  1347. sink.done();
  1348. return true;
  1349. };
  1350. const auto on_complete = [&](bool) {
  1351. llama.mutex.unlock();
  1352. };
  1353. lock.release();
  1354. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  1355. } });
  1356. svr.Get("/model.json", [&llama](const Request &, Response &res)
  1357. {
  1358. const json data = format_generation_settings(llama);
  1359. return res.set_content(data.dump(), "application/json"); });
  1360. svr.Options(R"(/.*)", [](const Request &, Response &res)
  1361. { return res.set_content("", "application/json"); });
  1362. svr.Post("/tokenize", [&llama](const Request &req, Response &res)
  1363. {
  1364. auto lock = llama.lock();
  1365. const json body = json::parse(req.body);
  1366. std::vector<llama_token> tokens;
  1367. if (body.count("content") != 0)
  1368. {
  1369. tokens = llama.tokenize(body["content"], false);
  1370. }
  1371. const json data = format_tokenizer_response(tokens);
  1372. return res.set_content(data.dump(), "application/json"); });
  1373. svr.Post("/detokenize", [&llama](const Request &req, Response &res)
  1374. {
  1375. auto lock = llama.lock();
  1376. const json body = json::parse(req.body);
  1377. std::string content;
  1378. if (body.count("tokens") != 0)
  1379. {
  1380. const std::vector<llama_token> tokens = body["tokens"];
  1381. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  1382. }
  1383. const json data = format_detokenized_response(content);
  1384. return res.set_content(data.dump(), "application/json"); });
  1385. svr.Post("/embedding", [&llama](const Request &req, Response &res)
  1386. {
  1387. auto lock = llama.lock();
  1388. const json body = json::parse(req.body);
  1389. llama.rewind();
  1390. llama_reset_timings(llama.ctx);
  1391. if (body.count("content") != 0)
  1392. {
  1393. llama.prompt = body["content"];
  1394. }
  1395. else
  1396. {
  1397. llama.prompt = "";
  1398. }
  1399. llama.params.n_predict = 0;
  1400. llama.loadPrompt();
  1401. llama.beginCompletion();
  1402. llama.doCompletion();
  1403. const json data = format_embedding_response(llama);
  1404. return res.set_content(data.dump(), "application/json"); });
  1405. svr.set_logger(log_server_request);
  1406. svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep)
  1407. {
  1408. const char fmt[] = "500 Internal Server Error\n%s";
  1409. char buf[BUFSIZ];
  1410. try {
  1411. std::rethrow_exception(std::move(ep));
  1412. } catch (std::exception & e) {
  1413. snprintf(buf, sizeof(buf), fmt, e.what());
  1414. } catch (...) {
  1415. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  1416. }
  1417. res.set_content(buf, "text/plain");
  1418. res.status = 500; });
  1419. svr.set_error_handler([](const Request &, Response &res)
  1420. {
  1421. if (res.status == 400) {
  1422. res.set_content("Invalid request", "text/plain");
  1423. } else if (res.status != 500) {
  1424. res.set_content("File Not Found", "text/plain");
  1425. res.status = 404;
  1426. } });
  1427. // set timeouts and change hostname and port
  1428. svr.set_read_timeout(sparams.read_timeout);
  1429. svr.set_write_timeout(sparams.write_timeout);
  1430. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  1431. {
  1432. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  1433. return 1;
  1434. }
  1435. // Set the base directory for serving static files
  1436. svr.set_base_dir(sparams.public_path);
  1437. // to make it ctrl+clickable:
  1438. printf("\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  1439. LOG_INFO("HTTP server listening", {
  1440. {"hostname", sparams.hostname},
  1441. {"port", sparams.port},
  1442. });
  1443. if (!svr.listen_after_bind())
  1444. {
  1445. return 1;
  1446. }
  1447. if (llama.grammar != nullptr) {
  1448. llama_grammar_free(llama.grammar);
  1449. }
  1450. llama_backend_free();
  1451. return 0;
  1452. }