1
0

server.cpp 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include "grammar-parser.h"
  5. #ifndef NDEBUG
  6. // crash the server in debug mode, otherwise send an http 500 error
  7. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  8. #endif
  9. #include "httplib.h"
  10. #include "json.hpp"
  11. // auto generated files (update with ./deps.sh)
  12. #include "index.html.hpp"
  13. #include "index.js.hpp"
  14. #include "completion.js.hpp"
  15. #include "json-schema-to-grammar.mjs.hpp"
  16. #include <cstddef>
  17. #ifndef SERVER_VERBOSE
  18. #define SERVER_VERBOSE 1
  19. #endif
  20. using namespace httplib;
  21. using json = nlohmann::json;
  22. struct server_params
  23. {
  24. std::string hostname = "127.0.0.1";
  25. std::string public_path = "examples/server/public";
  26. int32_t port = 8080;
  27. int32_t read_timeout = 600;
  28. int32_t write_timeout = 600;
  29. };
  30. // completion token output with probabilities
  31. struct completion_token_output
  32. {
  33. struct token_prob
  34. {
  35. llama_token tok;
  36. float prob;
  37. };
  38. std::vector<token_prob> probs;
  39. llama_token tok;
  40. };
  41. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  42. {
  43. size_t i;
  44. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  45. {
  46. }
  47. return i;
  48. }
  49. enum stop_type
  50. {
  51. STOP_FULL,
  52. STOP_PARTIAL,
  53. };
  54. static bool ends_with(const std::string &str, const std::string &suffix)
  55. {
  56. return str.size() >= suffix.size() &&
  57. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  58. }
  59. static size_t find_partial_stop_string(const std::string &stop,
  60. const std::string &text)
  61. {
  62. if (!text.empty() && !stop.empty())
  63. {
  64. const char text_last_char = text.back();
  65. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  66. {
  67. if (stop[char_index] == text_last_char)
  68. {
  69. const std::string current_partial = stop.substr(0, char_index + 1);
  70. if (ends_with(text, current_partial))
  71. {
  72. return text.size() - char_index - 1;
  73. }
  74. }
  75. }
  76. }
  77. return std::string::npos;
  78. }
  79. template <class Iter>
  80. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  81. {
  82. std::string ret;
  83. for (; begin != end; ++begin)
  84. {
  85. ret += llama_token_to_piece(ctx, *begin);
  86. }
  87. return ret;
  88. }
  89. static void server_log(const char *level, const char *function, int line,
  90. const char *message, const nlohmann::ordered_json &extra)
  91. {
  92. nlohmann::ordered_json log{
  93. {"timestamp", time(nullptr)},
  94. {"level", level},
  95. {"function", function},
  96. {"line", line},
  97. {"message", message},
  98. };
  99. if (!extra.empty())
  100. {
  101. log.merge_patch(extra);
  102. }
  103. const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
  104. printf("%.*s\n", (int)str.size(), str.data());
  105. fflush(stdout);
  106. }
  107. // format incomplete utf-8 multibyte character for output
  108. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  109. {
  110. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  111. // if the size is 1 and first bit is 1, meaning it's a partial character
  112. // (size > 1 meaning it's already a known token)
  113. if (out.size() == 1 && (out[0] & 0x80) == 0x80)
  114. {
  115. std::stringstream ss;
  116. ss << std::hex << (out[0] & 0xff);
  117. std::string res(ss.str());
  118. out = "byte: \\x" + res;
  119. }
  120. return out;
  121. }
  122. // convert a vector of completion_token_output to json
  123. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> & probs)
  124. {
  125. json out = json::array();
  126. for (const auto &prob : probs)
  127. {
  128. json probs_for_token = json::array();
  129. for (const auto &p : prob.probs)
  130. {
  131. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  132. probs_for_token.push_back(json{
  133. {"tok_str", tok_str},
  134. {"prob", p.prob},
  135. });
  136. }
  137. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  138. out.push_back(json{
  139. {"content", tok_str},
  140. {"probs", probs_for_token},
  141. });
  142. }
  143. return out;
  144. }
  145. static bool server_verbose = false;
  146. #if SERVER_VERBOSE != 1
  147. #define LOG_VERBOSE(MSG, ...)
  148. #else
  149. #define LOG_VERBOSE(MSG, ...) \
  150. do \
  151. { \
  152. if (server_verbose) \
  153. { \
  154. server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
  155. } \
  156. } while (0)
  157. #endif
  158. #define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
  159. #define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
  160. #define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  161. struct llama_server_context
  162. {
  163. bool stream = false;
  164. bool has_next_token = false;
  165. std::string generated_text;
  166. std::vector<completion_token_output> generated_token_probs;
  167. size_t num_prompt_tokens = 0;
  168. size_t num_tokens_predicted = 0;
  169. size_t n_past = 0;
  170. size_t n_remain = 0;
  171. json prompt;
  172. std::vector<llama_token> embd;
  173. std::vector<llama_token> last_n_tokens;
  174. llama_model *model = nullptr;
  175. llama_context *ctx = nullptr;
  176. gpt_params params;
  177. llama_sampling_context ctx_sampling;
  178. int n_ctx;
  179. grammar_parser::parse_state parsed_grammar;
  180. llama_grammar *grammar = nullptr;
  181. bool truncated = false;
  182. bool stopped_eos = false;
  183. bool stopped_word = false;
  184. bool stopped_limit = false;
  185. std::string stopping_word;
  186. int32_t multibyte_pending = 0;
  187. std::mutex mutex;
  188. std::unique_lock<std::mutex> lock()
  189. {
  190. return std::unique_lock<std::mutex>(mutex);
  191. }
  192. ~llama_server_context()
  193. {
  194. if (ctx)
  195. {
  196. llama_free(ctx);
  197. ctx = nullptr;
  198. }
  199. if (model)
  200. {
  201. llama_free_model(model);
  202. model = nullptr;
  203. }
  204. }
  205. void rewind()
  206. {
  207. params.antiprompt.clear();
  208. params.grammar.clear();
  209. num_prompt_tokens = 0;
  210. num_tokens_predicted = 0;
  211. generated_text = "";
  212. generated_text.reserve(n_ctx);
  213. generated_token_probs.clear();
  214. truncated = false;
  215. stopped_eos = false;
  216. stopped_word = false;
  217. stopped_limit = false;
  218. stopping_word = "";
  219. multibyte_pending = 0;
  220. n_remain = 0;
  221. n_past = 0;
  222. if (grammar != nullptr) {
  223. llama_grammar_free(grammar);
  224. grammar = nullptr;
  225. ctx_sampling = llama_sampling_context_init(params, NULL);
  226. }
  227. }
  228. bool loadModel(const gpt_params &params_)
  229. {
  230. params = params_;
  231. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  232. if (model == nullptr)
  233. {
  234. LOG_ERROR("unable to load model", {{"model", params_.model}});
  235. return false;
  236. }
  237. n_ctx = llama_n_ctx(ctx);
  238. last_n_tokens.resize(n_ctx);
  239. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  240. return true;
  241. }
  242. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  243. {
  244. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  245. // or the first element of the json_prompt array is a string.
  246. std::vector<llama_token> prompt_tokens;
  247. if (json_prompt.is_array())
  248. {
  249. bool first = true;
  250. for (const auto& p : json_prompt)
  251. {
  252. if (p.is_string())
  253. {
  254. auto s = p.template get<std::string>();
  255. std::vector<llama_token> p;
  256. if (first)
  257. {
  258. p = ::llama_tokenize(ctx, s, add_bos);
  259. first = false;
  260. }
  261. else
  262. {
  263. p = ::llama_tokenize(ctx, s, false);
  264. }
  265. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  266. }
  267. else
  268. {
  269. if (first)
  270. {
  271. first = false;
  272. }
  273. prompt_tokens.push_back(p.template get<llama_token>());
  274. }
  275. }
  276. }
  277. else
  278. {
  279. auto s = json_prompt.template get<std::string>();
  280. prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
  281. }
  282. return prompt_tokens;
  283. }
  284. bool loadGrammar()
  285. {
  286. if (!params.grammar.empty()) {
  287. parsed_grammar = grammar_parser::parse(params.grammar.c_str());
  288. // will be empty (default) if there are parse errors
  289. if (parsed_grammar.rules.empty()) {
  290. LOG_ERROR("grammar parse error", {{"grammar", params.grammar}});
  291. return false;
  292. }
  293. grammar_parser::print_grammar(stderr, parsed_grammar);
  294. {
  295. auto it = params.sampling_params.logit_bias.find(llama_token_eos(ctx));
  296. if (it != params.sampling_params.logit_bias.end() && it->second == -INFINITY) {
  297. LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
  298. }
  299. }
  300. std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
  301. grammar = llama_grammar_init(
  302. grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
  303. }
  304. ctx_sampling = llama_sampling_context_init(params, grammar);
  305. return true;
  306. }
  307. void loadInfill()
  308. {
  309. bool suff_rm_leading_spc = true;
  310. if (params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) {
  311. params.input_suffix.erase(0, 1);
  312. suff_rm_leading_spc = false;
  313. }
  314. auto prefix_tokens = tokenize(params.input_prefix, false);
  315. auto suffix_tokens = tokenize(params.input_suffix, false);
  316. const int space_token = 29871;
  317. if (suff_rm_leading_spc && suffix_tokens[0] == space_token) {
  318. suffix_tokens.erase(suffix_tokens.begin());
  319. }
  320. prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(ctx));
  321. prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(ctx)); // always add BOS
  322. prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(ctx));
  323. prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
  324. prefix_tokens.push_back(llama_token_middle(ctx));
  325. auto prompt_tokens = prefix_tokens;
  326. num_prompt_tokens = prompt_tokens.size();
  327. if (params.n_keep < 0)
  328. {
  329. params.n_keep = (int)num_prompt_tokens;
  330. }
  331. params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
  332. // if input prompt is too big, truncate like normal
  333. if (num_prompt_tokens >= (size_t)params.n_ctx)
  334. {
  335. printf("Input prompt is too big, truncating. Can only take %d tokens but got %zu\n", params.n_ctx, num_prompt_tokens);
  336. // todo we probably want to cut from both sides
  337. const int n_left = (params.n_ctx - params.n_keep) / 2;
  338. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  339. const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
  340. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  341. std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  342. LOG_VERBOSE("input truncated", {
  343. {"n_ctx", params.n_ctx},
  344. {"n_keep", params.n_keep},
  345. {"n_left", n_left},
  346. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  347. });
  348. truncated = true;
  349. prompt_tokens = new_tokens;
  350. }
  351. else
  352. {
  353. const size_t ps = num_prompt_tokens;
  354. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  355. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  356. }
  357. // compare the evaluated prompt with the new prompt
  358. n_past = common_part(embd, prompt_tokens);
  359. embd = prompt_tokens;
  360. if (n_past == num_prompt_tokens)
  361. {
  362. // we have to evaluate at least 1 token to generate logits.
  363. printf("we have to evaluate at least 1 token to generate logits\n");
  364. n_past--;
  365. }
  366. LOG_VERBOSE("prompt ingested", {
  367. {"n_past", n_past},
  368. {"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
  369. {"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  370. });
  371. has_next_token = true;
  372. }
  373. void loadPrompt()
  374. {
  375. auto prompt_tokens = tokenize(prompt, true); // always add BOS
  376. num_prompt_tokens = prompt_tokens.size();
  377. if (params.n_keep < 0)
  378. {
  379. params.n_keep = (int)num_prompt_tokens;
  380. }
  381. params.n_keep = std::min(n_ctx - 4, params.n_keep);
  382. // if input prompt is too big, truncate like normal
  383. if (num_prompt_tokens >= (size_t)n_ctx)
  384. {
  385. const int n_left = (n_ctx - params.n_keep) / 2;
  386. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  387. const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
  388. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  389. std::copy(prompt_tokens.end() - n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  390. LOG_VERBOSE("input truncated", {
  391. {"n_ctx", n_ctx},
  392. {"n_keep", params.n_keep},
  393. {"n_left", n_left},
  394. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  395. });
  396. truncated = true;
  397. prompt_tokens = new_tokens;
  398. }
  399. else
  400. {
  401. const size_t ps = num_prompt_tokens;
  402. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  403. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  404. }
  405. // compare the evaluated prompt with the new prompt
  406. n_past = common_part(embd, prompt_tokens);
  407. // since #3228 we now have to manually manage the KV cache
  408. llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
  409. embd = prompt_tokens;
  410. if (n_past == num_prompt_tokens)
  411. {
  412. // we have to evaluate at least 1 token to generate logits.
  413. n_past--;
  414. }
  415. LOG_VERBOSE("prompt ingested", {
  416. {"n_past", n_past},
  417. {"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
  418. {"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  419. });
  420. has_next_token = true;
  421. }
  422. void beginCompletion()
  423. {
  424. // number of tokens to keep when resetting context
  425. n_remain = params.n_predict;
  426. llama_set_rng_seed(ctx, params.seed);
  427. }
  428. completion_token_output nextToken()
  429. {
  430. completion_token_output result;
  431. result.tok = -1;
  432. if (embd.size() >= (size_t)n_ctx)
  433. {
  434. // Shift context
  435. const int n_left = n_past - params.n_keep - 1;
  436. const int n_discard = n_left/2;
  437. llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1);
  438. llama_kv_cache_seq_shift(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);
  439. for (size_t i = params.n_keep + 1 + n_discard; i < embd.size(); i++)
  440. {
  441. embd[i - n_discard] = embd[i];
  442. }
  443. embd.resize(embd.size() - n_discard);
  444. n_past -= n_discard;
  445. truncated = true;
  446. LOG_VERBOSE("input truncated", {
  447. {"n_ctx", n_ctx},
  448. {"n_keep", params.n_keep},
  449. {"n_left", n_left},
  450. });
  451. }
  452. bool tg = true;
  453. while (n_past < embd.size())
  454. {
  455. int n_eval = (int)embd.size() - n_past;
  456. tg = n_eval == 1;
  457. if (n_eval > params.n_batch)
  458. {
  459. n_eval = params.n_batch;
  460. }
  461. if (llama_decode(ctx, llama_batch_get_one(&embd[n_past], n_eval, n_past, 0)))
  462. {
  463. LOG_ERROR("failed to eval", {
  464. {"n_eval", n_eval},
  465. {"n_past", n_past},
  466. {"embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  467. });
  468. has_next_token = false;
  469. return result;
  470. }
  471. n_past += n_eval;
  472. }
  473. if (params.n_predict == 0)
  474. {
  475. has_next_token = false;
  476. result.tok = llama_token_eos(ctx);
  477. return result;
  478. }
  479. {
  480. // out of user input, sample next token
  481. std::vector<llama_token_data> candidates;
  482. candidates.reserve(llama_n_vocab(model));
  483. result.tok = llama_sampling_sample(ctx, NULL, ctx_sampling, last_n_tokens, candidates);
  484. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  485. const int32_t n_probs = params.sampling_params.n_probs;
  486. if (params.sampling_params.temp <= 0 && n_probs > 0)
  487. {
  488. // For llama_sample_token_greedy we need to sort candidates
  489. llama_sample_softmax(ctx, &candidates_p);
  490. }
  491. for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
  492. {
  493. result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
  494. }
  495. last_n_tokens.erase(last_n_tokens.begin());
  496. last_n_tokens.push_back(result.tok);
  497. if (tg) {
  498. num_tokens_predicted++;
  499. }
  500. }
  501. // add it to the context
  502. embd.push_back(result.tok);
  503. // decrement remaining sampling budget
  504. --n_remain;
  505. if (!embd.empty() && embd.back() == llama_token_eos(ctx))
  506. {
  507. // stopping_word = llama_token_to_piece(ctx, embd.back());
  508. has_next_token = false;
  509. stopped_eos = true;
  510. LOG_VERBOSE("eos token found", {});
  511. return result;
  512. }
  513. has_next_token = params.n_predict == -1 || n_remain != 0;
  514. return result;
  515. }
  516. size_t findStoppingStrings(const std::string &text, const size_t last_token_size,
  517. const stop_type type)
  518. {
  519. size_t stop_pos = std::string::npos;
  520. for (const std::string &word : params.antiprompt)
  521. {
  522. size_t pos;
  523. if (type == STOP_FULL)
  524. {
  525. const size_t tmp = word.size() + last_token_size;
  526. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  527. pos = text.find(word, from_pos);
  528. }
  529. else
  530. {
  531. pos = find_partial_stop_string(word, text);
  532. }
  533. if (pos != std::string::npos &&
  534. (stop_pos == std::string::npos || pos < stop_pos))
  535. {
  536. if (type == STOP_FULL)
  537. {
  538. stopping_word = word;
  539. stopped_word = true;
  540. has_next_token = false;
  541. }
  542. stop_pos = pos;
  543. }
  544. }
  545. return stop_pos;
  546. }
  547. completion_token_output doCompletion()
  548. {
  549. auto token_with_probs = nextToken();
  550. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(ctx, token_with_probs.tok);
  551. generated_text += token_text;
  552. if (params.sampling_params.n_probs > 0)
  553. {
  554. generated_token_probs.push_back(token_with_probs);
  555. }
  556. if (multibyte_pending > 0)
  557. {
  558. multibyte_pending -= token_text.size();
  559. }
  560. else if (token_text.size() == 1)
  561. {
  562. const char c = token_text[0];
  563. // 2-byte characters: 110xxxxx 10xxxxxx
  564. if ((c & 0xE0) == 0xC0)
  565. {
  566. multibyte_pending = 1;
  567. // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
  568. }
  569. else if ((c & 0xF0) == 0xE0)
  570. {
  571. multibyte_pending = 2;
  572. // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
  573. }
  574. else if ((c & 0xF8) == 0xF0)
  575. {
  576. multibyte_pending = 3;
  577. }
  578. else
  579. {
  580. multibyte_pending = 0;
  581. }
  582. }
  583. if (multibyte_pending > 0 && !has_next_token)
  584. {
  585. has_next_token = true;
  586. n_remain++;
  587. }
  588. if (!has_next_token && n_remain == 0)
  589. {
  590. stopped_limit = true;
  591. }
  592. LOG_VERBOSE("next token", {
  593. {"token", token_with_probs.tok},
  594. {"token_text", tokens_to_output_formatted_string(ctx, token_with_probs.tok)},
  595. {"has_next_token", has_next_token},
  596. {"n_remain", n_remain},
  597. {"num_tokens_predicted", num_tokens_predicted},
  598. {"stopped_eos", stopped_eos},
  599. {"stopped_word", stopped_word},
  600. {"stopped_limit", stopped_limit},
  601. {"stopping_word", stopping_word},
  602. });
  603. return token_with_probs;
  604. }
  605. std::vector<float> getEmbedding()
  606. {
  607. static const int n_embd = llama_n_embd(model);
  608. if (!params.embedding)
  609. {
  610. LOG_WARNING("embedding disabled", {
  611. {"params.embedding", params.embedding},
  612. });
  613. return std::vector<float>(n_embd, 0.0f);
  614. }
  615. const float *data = llama_get_embeddings(ctx);
  616. std::vector<float> embedding(data, data + n_embd);
  617. return embedding;
  618. }
  619. };
  620. static void server_print_usage(const char *argv0, const gpt_params &params,
  621. const server_params &sparams)
  622. {
  623. printf("usage: %s [options]\n", argv0);
  624. printf("\n");
  625. printf("options:\n");
  626. printf(" -h, --help show this help message and exit\n");
  627. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  628. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  629. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  630. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  631. printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n");
  632. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  633. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  634. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  635. if (llama_mlock_supported())
  636. {
  637. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  638. }
  639. if (llama_mmap_supported())
  640. {
  641. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  642. }
  643. printf(" --numa attempt optimizations that help on some NUMA systems\n");
  644. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  645. printf(" -ngl N, --n-gpu-layers N\n");
  646. printf(" number of layers to store in VRAM\n");
  647. printf(" -ts SPLIT --tensor-split SPLIT\n");
  648. printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  649. printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
  650. printf(" -nommq, --no-mul-mat-q\n");
  651. printf(" use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
  652. printf(" Not recommended since this is both slower and uses more VRAM.\n");
  653. #endif
  654. printf(" -m FNAME, --model FNAME\n");
  655. printf(" model path (default: %s)\n", params.model.c_str());
  656. printf(" -a ALIAS, --alias ALIAS\n");
  657. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  658. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  659. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  660. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  661. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  662. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  663. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  664. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  665. printf("\n");
  666. }
  667. static void server_params_parse(int argc, char **argv, server_params &sparams,
  668. gpt_params &params)
  669. {
  670. gpt_params default_params;
  671. server_params default_sparams;
  672. std::string arg;
  673. bool invalid_param = false;
  674. for (int i = 1; i < argc; i++)
  675. {
  676. arg = argv[i];
  677. if (arg == "--port")
  678. {
  679. if (++i >= argc)
  680. {
  681. invalid_param = true;
  682. break;
  683. }
  684. sparams.port = std::stoi(argv[i]);
  685. }
  686. else if (arg == "--host")
  687. {
  688. if (++i >= argc)
  689. {
  690. invalid_param = true;
  691. break;
  692. }
  693. sparams.hostname = argv[i];
  694. }
  695. else if (arg == "--path")
  696. {
  697. if (++i >= argc)
  698. {
  699. invalid_param = true;
  700. break;
  701. }
  702. sparams.public_path = argv[i];
  703. }
  704. else if (arg == "--timeout" || arg == "-to")
  705. {
  706. if (++i >= argc)
  707. {
  708. invalid_param = true;
  709. break;
  710. }
  711. sparams.read_timeout = std::stoi(argv[i]);
  712. sparams.write_timeout = std::stoi(argv[i]);
  713. }
  714. else if (arg == "-m" || arg == "--model")
  715. {
  716. if (++i >= argc)
  717. {
  718. invalid_param = true;
  719. break;
  720. }
  721. params.model = argv[i];
  722. }
  723. else if (arg == "-a" || arg == "--alias")
  724. {
  725. if (++i >= argc)
  726. {
  727. invalid_param = true;
  728. break;
  729. }
  730. params.model_alias = argv[i];
  731. }
  732. else if (arg == "-h" || arg == "--help")
  733. {
  734. server_print_usage(argv[0], default_params, default_sparams);
  735. exit(0);
  736. }
  737. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  738. {
  739. if (++i >= argc)
  740. {
  741. invalid_param = true;
  742. break;
  743. }
  744. params.n_ctx = std::stoi(argv[i]);
  745. }
  746. else if (arg == "--rope-freq-base")
  747. {
  748. if (++i >= argc)
  749. {
  750. invalid_param = true;
  751. break;
  752. }
  753. params.rope_freq_base = std::stof(argv[i]);
  754. }
  755. else if (arg == "--rope-freq-scale")
  756. {
  757. if (++i >= argc)
  758. {
  759. invalid_param = true;
  760. break;
  761. }
  762. params.rope_freq_scale = std::stof(argv[i]);
  763. }
  764. else if (arg == "--memory-f32" || arg == "--memory_f32")
  765. {
  766. params.memory_f16 = false;
  767. }
  768. else if (arg == "--threads" || arg == "-t")
  769. {
  770. if (++i >= argc)
  771. {
  772. invalid_param = true;
  773. break;
  774. }
  775. params.n_threads = std::stoi(argv[i]);
  776. }
  777. else if (arg == "-b" || arg == "--batch-size")
  778. {
  779. if (++i >= argc)
  780. {
  781. invalid_param = true;
  782. break;
  783. }
  784. params.n_batch = std::stoi(argv[i]);
  785. params.n_batch = std::min(512, params.n_batch);
  786. }
  787. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  788. {
  789. if (++i >= argc)
  790. {
  791. invalid_param = true;
  792. break;
  793. }
  794. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  795. params.n_gpu_layers = std::stoi(argv[i]);
  796. #else
  797. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  798. "See main README.md for information on enabling GPU BLAS support",
  799. {{"n_gpu_layers", params.n_gpu_layers}});
  800. #endif
  801. }
  802. else if (arg == "--tensor-split" || arg == "-ts")
  803. {
  804. if (++i >= argc)
  805. {
  806. invalid_param = true;
  807. break;
  808. }
  809. #ifdef GGML_USE_CUBLAS
  810. std::string arg_next = argv[i];
  811. // split string by , and /
  812. const std::regex regex{R"([,/]+)"};
  813. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  814. std::vector<std::string> split_arg{it, {}};
  815. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  816. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
  817. {
  818. if (i_device < split_arg.size())
  819. {
  820. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  821. }
  822. else
  823. {
  824. params.tensor_split[i_device] = 0.0f;
  825. }
  826. }
  827. #else
  828. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  829. #endif // GGML_USE_CUBLAS
  830. }
  831. else if (arg == "--no-mul-mat-q" || arg == "-nommq")
  832. {
  833. #ifdef GGML_USE_CUBLAS
  834. params.mul_mat_q = false;
  835. #else
  836. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
  837. #endif // GGML_USE_CUBLAS
  838. }
  839. else if (arg == "--main-gpu" || arg == "-mg")
  840. {
  841. if (++i >= argc)
  842. {
  843. invalid_param = true;
  844. break;
  845. }
  846. #ifdef GGML_USE_CUBLAS
  847. params.main_gpu = std::stoi(argv[i]);
  848. #else
  849. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  850. #endif
  851. }
  852. else if (arg == "--lora")
  853. {
  854. if (++i >= argc)
  855. {
  856. invalid_param = true;
  857. break;
  858. }
  859. params.lora_adapter.push_back(std::make_tuple(argv[i], 1.0f));
  860. params.use_mmap = false;
  861. }
  862. else if (arg == "--lora-scaled")
  863. {
  864. if (++i >= argc)
  865. {
  866. invalid_param = true;
  867. break;
  868. }
  869. const char * lora_adapter = argv[i];
  870. if (++i >= argc)
  871. {
  872. invalid_param = true;
  873. break;
  874. }
  875. params.lora_adapter.push_back(std::make_tuple(lora_adapter, std::stof(argv[i])));
  876. params.use_mmap = false;
  877. }
  878. else if (arg == "--lora-base")
  879. {
  880. if (++i >= argc)
  881. {
  882. invalid_param = true;
  883. break;
  884. }
  885. params.lora_base = argv[i];
  886. }
  887. else if (arg == "-v" || arg == "--verbose")
  888. {
  889. #if SERVER_VERBOSE != 1
  890. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  891. #else
  892. server_verbose = true;
  893. #endif
  894. }
  895. else if (arg == "--mlock")
  896. {
  897. params.use_mlock = true;
  898. }
  899. else if (arg == "--no-mmap")
  900. {
  901. params.use_mmap = false;
  902. }
  903. else if (arg == "--numa")
  904. {
  905. params.numa = true;
  906. }
  907. else if (arg == "--embedding")
  908. {
  909. params.embedding = true;
  910. }
  911. else
  912. {
  913. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  914. server_print_usage(argv[0], default_params, default_sparams);
  915. exit(1);
  916. }
  917. }
  918. if (invalid_param)
  919. {
  920. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  921. server_print_usage(argv[0], default_params, default_sparams);
  922. exit(1);
  923. }
  924. }
  925. static json format_generation_settings(llama_server_context &llama)
  926. {
  927. const auto & sparams = llama.params.sampling_params;
  928. const auto eos_bias = sparams.logit_bias.find(llama_token_eos(llama.ctx));
  929. const bool ignore_eos = eos_bias != sparams.logit_bias.end() &&
  930. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  931. return json{
  932. {"n_ctx", llama.n_ctx},
  933. {"model", llama.params.model_alias},
  934. {"seed", llama.params.seed},
  935. {"temp", sparams.temp},
  936. {"top_k", sparams.top_k},
  937. {"top_p", sparams.top_p},
  938. {"tfs_z", sparams.tfs_z},
  939. {"typical_p", sparams.typical_p},
  940. {"repeat_last_n", sparams.repeat_last_n},
  941. {"repeat_penalty", sparams.repeat_penalty},
  942. {"presence_penalty", sparams.presence_penalty},
  943. {"frequency_penalty", sparams.frequency_penalty},
  944. {"mirostat", sparams.mirostat},
  945. {"mirostat_tau", sparams.mirostat_tau},
  946. {"mirostat_eta", sparams.mirostat_eta},
  947. {"penalize_nl", sparams.penalize_nl},
  948. {"stop", llama.params.antiprompt},
  949. {"n_predict", llama.params.n_predict},
  950. {"n_keep", llama.params.n_keep},
  951. {"ignore_eos", ignore_eos},
  952. {"stream", llama.stream},
  953. {"logit_bias", sparams.logit_bias},
  954. {"n_probs", sparams.n_probs},
  955. {"grammar", llama.params.grammar},
  956. };
  957. }
  958. static json format_embedding_response(llama_server_context &llama)
  959. {
  960. return json{
  961. {"embedding", llama.getEmbedding()},
  962. };
  963. }
  964. static json format_timings(llama_server_context &llama)
  965. {
  966. const auto timings = llama_get_timings(llama.ctx);
  967. return json{
  968. {"prompt_n", timings.n_p_eval},
  969. {"prompt_ms", timings.t_p_eval_ms},
  970. {"prompt_per_token_ms", timings.t_p_eval_ms / timings.n_p_eval},
  971. {"prompt_per_second", 1e3 / timings.t_p_eval_ms * timings.n_p_eval},
  972. {"predicted_n", timings.n_eval},
  973. {"predicted_ms", timings.t_eval_ms},
  974. {"predicted_per_token_ms", timings.t_eval_ms / timings.n_eval},
  975. {"predicted_per_second", 1e3 / timings.t_eval_ms * timings.n_eval},
  976. };
  977. }
  978. static json format_final_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  979. {
  980. json res = json{
  981. {"content", content},
  982. {"stop", true},
  983. {"model", llama.params.model_alias},
  984. {"tokens_predicted", llama.num_tokens_predicted},
  985. {"tokens_evaluated", llama.num_prompt_tokens},
  986. {"generation_settings", format_generation_settings(llama)},
  987. {"prompt", llama.prompt},
  988. {"truncated", llama.truncated},
  989. {"stopped_eos", llama.stopped_eos},
  990. {"stopped_word", llama.stopped_word},
  991. {"stopped_limit", llama.stopped_limit},
  992. {"stopping_word", llama.stopping_word},
  993. {"tokens_cached", llama.n_past},
  994. {"timings", format_timings(llama)},
  995. };
  996. if (llama.params.sampling_params.n_probs > 0)
  997. {
  998. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  999. }
  1000. return res;
  1001. }
  1002. static json format_partial_response(
  1003. llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs
  1004. ) {
  1005. json res = json{
  1006. {"content", content},
  1007. {"stop", false},
  1008. };
  1009. if (llama.params.sampling_params.n_probs > 0)
  1010. {
  1011. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  1012. }
  1013. return res;
  1014. }
  1015. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  1016. {
  1017. return json{
  1018. {"tokens", tokens}};
  1019. }
  1020. static json format_detokenized_response(std::string content)
  1021. {
  1022. return json{
  1023. {"content", content}};
  1024. }
  1025. template <typename T>
  1026. static T json_value(const json &body, const std::string &key, const T &default_value)
  1027. {
  1028. // Fallback null to default value
  1029. return body.contains(key) && !body.at(key).is_null()
  1030. ? body.value(key, default_value)
  1031. : default_value;
  1032. }
  1033. static void parse_options_completion(const json &body, llama_server_context &llama)
  1034. {
  1035. gpt_params default_params;
  1036. const auto & default_sparams = default_params.sampling_params;
  1037. auto & sparams = llama.params.sampling_params;
  1038. llama.stream = json_value(body, "stream", false);
  1039. llama.params.n_predict = json_value(body, "n_predict", default_params.n_predict);
  1040. sparams.top_k = json_value(body, "top_k", default_sparams.top_k);
  1041. sparams.top_p = json_value(body, "top_p", default_sparams.top_p);
  1042. sparams.tfs_z = json_value(body, "tfs_z", default_sparams.tfs_z);
  1043. sparams.typical_p = json_value(body, "typical_p", default_sparams.typical_p);
  1044. sparams.repeat_last_n = json_value(body, "repeat_last_n", default_sparams.repeat_last_n);
  1045. sparams.temp = json_value(body, "temperature", default_sparams.temp);
  1046. sparams.repeat_penalty = json_value(body, "repeat_penalty", default_sparams.repeat_penalty);
  1047. sparams.presence_penalty = json_value(body, "presence_penalty", default_sparams.presence_penalty);
  1048. sparams.frequency_penalty = json_value(body, "frequency_penalty", default_sparams.frequency_penalty);
  1049. sparams.mirostat = json_value(body, "mirostat", default_sparams.mirostat);
  1050. sparams.mirostat_tau = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
  1051. sparams.mirostat_eta = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
  1052. sparams.penalize_nl = json_value(body, "penalize_nl", default_sparams.penalize_nl);
  1053. llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
  1054. llama.params.seed = json_value(body, "seed", default_params.seed);
  1055. llama.params.grammar = json_value(body, "grammar", default_params.grammar);
  1056. sparams.n_probs = json_value(body, "n_probs", default_sparams.n_probs);
  1057. if (body.count("prompt") != 0)
  1058. {
  1059. llama.prompt = body["prompt"];
  1060. }
  1061. else
  1062. {
  1063. llama.prompt = "";
  1064. }
  1065. sparams.logit_bias.clear();
  1066. if (json_value(body, "ignore_eos", false))
  1067. {
  1068. sparams.logit_bias[llama_token_eos(llama.ctx)] = -INFINITY;
  1069. }
  1070. const auto &logit_bias = body.find("logit_bias");
  1071. if (logit_bias != body.end() && logit_bias->is_array())
  1072. {
  1073. const int n_vocab = llama_n_vocab(llama.model);
  1074. for (const auto &el : *logit_bias)
  1075. {
  1076. if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
  1077. {
  1078. llama_token tok = el[0].get<llama_token>();
  1079. if (tok >= 0 && tok < n_vocab)
  1080. {
  1081. if (el[1].is_number())
  1082. {
  1083. sparams.logit_bias[tok] = el[1].get<float>();
  1084. }
  1085. else if (el[1].is_boolean() && !el[1].get<bool>())
  1086. {
  1087. sparams.logit_bias[tok] = -INFINITY;
  1088. }
  1089. }
  1090. }
  1091. }
  1092. }
  1093. llama.params.antiprompt.clear();
  1094. const auto &stop = body.find("stop");
  1095. if (stop != body.end() && stop->is_array())
  1096. {
  1097. for (const auto &word : *stop)
  1098. {
  1099. if (!word.empty())
  1100. {
  1101. llama.params.antiprompt.push_back(word);
  1102. }
  1103. }
  1104. }
  1105. llama.ctx_sampling = llama_sampling_context_init(llama.params, llama.grammar);
  1106. LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
  1107. }
  1108. static void parse_options_infill(const json &body, llama_server_context &llama)
  1109. {
  1110. if (body.count("input_prefix") != 0)
  1111. {
  1112. llama.params.input_prefix = body["input_prefix"];
  1113. }
  1114. else
  1115. {
  1116. llama.params.input_prefix = "";
  1117. }
  1118. if (body.count("input_suffix") != 0)
  1119. {
  1120. llama.params.input_suffix = body["input_suffix"];
  1121. }
  1122. else
  1123. {
  1124. llama.params.input_suffix = "";
  1125. }
  1126. parse_options_completion(body, llama);
  1127. }
  1128. static void log_server_request(const Request &req, const Response &res)
  1129. {
  1130. LOG_INFO("request", {
  1131. {"remote_addr", req.remote_addr},
  1132. {"remote_port", req.remote_port},
  1133. {"status", res.status},
  1134. {"method", req.method},
  1135. {"path", req.path},
  1136. {"params", req.params},
  1137. });
  1138. LOG_VERBOSE("request", {
  1139. {"request", req.body},
  1140. {"response", res.body},
  1141. });
  1142. }
  1143. static bool is_at_eob(llama_server_context &server_context, const llama_token *tokens, const size_t n_tokens) {
  1144. return n_tokens && tokens[n_tokens-1] == llama_token_eos(server_context.ctx);
  1145. }
  1146. // Function matching type llama_beam_search_callback_fn_t.
  1147. // Custom callback example is called each time the beams lengths increase:
  1148. // * Show progress by printing ',' following by number of convergent beam tokens if any.
  1149. // * When all beams converge to a common prefix, they are made available in beams_state.beams[0].
  1150. // This is also called when the stop condition is met.
  1151. // Collect tokens into std::vector<llama_token> response which is pointed to by callback_data.
  1152. static void beam_search_callback(void *callback_data, llama_beams_state beams_state) {
  1153. auto & llama = *static_cast<llama_server_context*>(callback_data);
  1154. // Mark beams as EOS as needed.
  1155. for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
  1156. llama_beam_view& beam_view = beams_state.beam_views[i];
  1157. if (!beam_view.eob && is_at_eob(llama, beam_view.tokens, beam_view.n_tokens)) {
  1158. beam_view.eob = true;
  1159. }
  1160. }
  1161. printf(","); // Show progress
  1162. if (const size_t n = beams_state.common_prefix_length) {
  1163. llama.generated_token_probs.resize(llama.generated_token_probs.size() + n);
  1164. assert(0u < beams_state.n_beams);
  1165. const llama_token * tokens = beams_state.beam_views[0].tokens;
  1166. const auto map = [](llama_token tok) { return completion_token_output{{},tok}; };
  1167. std::transform(tokens, tokens + n, llama.generated_token_probs.end() - n, map);
  1168. printf("%zu", n);
  1169. }
  1170. fflush(stdout);
  1171. #if 0 // DEBUG: print current beams for this iteration
  1172. std::cout << "\n\nCurrent beams:\n";
  1173. for (size_t i=0 ; i < beams_state.n_beams ; ++i) {
  1174. std::cout << "beams["<<i<<"]: " << ostream_beam_view{state.ctx,beams_state.beam_views[i]} << std::endl;
  1175. }
  1176. #endif
  1177. }
  1178. struct token_translator {
  1179. llama_context * ctx;
  1180. std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
  1181. std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
  1182. };
  1183. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama)
  1184. {
  1185. auto & gtps = llama.generated_token_probs;
  1186. auto translator = token_translator{llama.ctx};
  1187. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  1188. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  1189. if (llama.generated_text.capacity() < llama.generated_text.size() + len) {
  1190. llama.generated_text.reserve(llama.generated_text.size() + len);
  1191. }
  1192. for (const completion_token_output & cto : gtps) {
  1193. llama.generated_text += translator(cto);
  1194. }
  1195. }
  1196. int main(int argc, char **argv)
  1197. {
  1198. // own arguments required by this example
  1199. gpt_params params;
  1200. server_params sparams;
  1201. // struct that contains llama context and inference
  1202. llama_server_context llama;
  1203. server_params_parse(argc, argv, sparams, params);
  1204. if (params.model_alias == "unknown")
  1205. {
  1206. params.model_alias = params.model;
  1207. }
  1208. llama_backend_init(params.numa);
  1209. LOG_INFO("build info", {{"build", BUILD_NUMBER},
  1210. {"commit", BUILD_COMMIT}});
  1211. LOG_INFO("system info", {
  1212. {"n_threads", params.n_threads},
  1213. {"n_threads_batch", params.n_threads_batch},
  1214. {"total_threads", std::thread::hardware_concurrency()},
  1215. {"system_info", llama_print_system_info()},
  1216. });
  1217. // load the model
  1218. if (!llama.loadModel(params))
  1219. {
  1220. return 1;
  1221. }
  1222. Server svr;
  1223. svr.set_default_headers({{"Server", "llama.cpp"},
  1224. {"Access-Control-Allow-Origin", "*"},
  1225. {"Access-Control-Allow-Headers", "content-type"}});
  1226. // this is only called if no index.html is found in the public --path
  1227. svr.Get("/", [](const Request &, Response &res)
  1228. {
  1229. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html");
  1230. return false; });
  1231. // this is only called if no index.js is found in the public --path
  1232. svr.Get("/index.js", [](const Request &, Response &res)
  1233. {
  1234. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript");
  1235. return false; });
  1236. // this is only called if no index.html is found in the public --path
  1237. svr.Get("/completion.js", [](const Request &, Response &res)
  1238. {
  1239. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
  1240. return false; });
  1241. // this is only called if no index.html is found in the public --path
  1242. svr.Get("/json-schema-to-grammar.mjs", [](const Request &, Response &res)
  1243. {
  1244. res.set_content(reinterpret_cast<const char*>(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript");
  1245. return false; });
  1246. svr.Post("/completion", [&llama](const Request &req, Response &res)
  1247. {
  1248. auto lock = llama.lock();
  1249. llama.rewind();
  1250. llama_reset_timings(llama.ctx);
  1251. parse_options_completion(json::parse(req.body), llama);
  1252. if (!llama.loadGrammar())
  1253. {
  1254. res.status = 400;
  1255. return;
  1256. }
  1257. llama.loadPrompt();
  1258. llama.beginCompletion();
  1259. if (!llama.stream) {
  1260. if (llama.params.n_beams) {
  1261. // Fill llama.generated_token_probs vector with final beam.
  1262. llama_beam_search(llama.ctx, beam_search_callback, &llama, llama.params.n_beams,
  1263. llama.n_past, llama.n_remain);
  1264. // Translate llama.generated_token_probs to llama.generated_text.
  1265. append_to_generated_text_from_generated_token_probs(llama);
  1266. } else {
  1267. size_t stop_pos = std::string::npos;
  1268. while (llama.has_next_token) {
  1269. const completion_token_output token_with_probs = llama.doCompletion();
  1270. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(llama.ctx, token_with_probs.tok);
  1271. stop_pos = llama.findStoppingStrings(llama.generated_text,
  1272. token_text.size(), STOP_FULL);
  1273. }
  1274. if (stop_pos == std::string::npos) {
  1275. stop_pos = llama.findStoppingStrings(llama.generated_text, 0, STOP_PARTIAL);
  1276. }
  1277. if (stop_pos != std::string::npos) {
  1278. llama.generated_text.erase(llama.generated_text.begin() + stop_pos,
  1279. llama.generated_text.end());
  1280. }
  1281. }
  1282. auto probs = llama.generated_token_probs;
  1283. if (llama.params.sampling_params.n_probs > 0 && llama.stopped_word) {
  1284. const std::vector<llama_token> stop_word_toks = llama_tokenize(llama.ctx, llama.stopping_word, false);
  1285. probs = std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.end() - stop_word_toks.size());
  1286. }
  1287. const json data = format_final_response(llama, llama.generated_text, probs);
  1288. llama_print_timings(llama.ctx);
  1289. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
  1290. "application/json");
  1291. } else {
  1292. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  1293. size_t sent_count = 0;
  1294. size_t sent_token_probs_index = 0;
  1295. while (llama.has_next_token) {
  1296. const completion_token_output token_with_probs = llama.doCompletion();
  1297. if (token_with_probs.tok == -1 || llama.multibyte_pending > 0) {
  1298. continue;
  1299. }
  1300. const std::string token_text = llama_token_to_piece(llama.ctx, token_with_probs.tok);
  1301. size_t pos = std::min(sent_count, llama.generated_text.size());
  1302. const std::string str_test = llama.generated_text.substr(pos);
  1303. bool is_stop_full = false;
  1304. size_t stop_pos =
  1305. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  1306. if (stop_pos != std::string::npos) {
  1307. is_stop_full = true;
  1308. llama.generated_text.erase(
  1309. llama.generated_text.begin() + pos + stop_pos,
  1310. llama.generated_text.end());
  1311. pos = std::min(sent_count, llama.generated_text.size());
  1312. } else {
  1313. is_stop_full = false;
  1314. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  1315. STOP_PARTIAL);
  1316. }
  1317. if (
  1318. stop_pos == std::string::npos ||
  1319. // Send rest of the text if we are at the end of the generation
  1320. (!llama.has_next_token && !is_stop_full && stop_pos > 0)
  1321. ) {
  1322. const std::string to_send = llama.generated_text.substr(pos, std::string::npos);
  1323. sent_count += to_send.size();
  1324. std::vector<completion_token_output> probs_output = {};
  1325. if (llama.params.sampling_params.n_probs > 0) {
  1326. const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
  1327. size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
  1328. size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
  1329. if (probs_pos < probs_stop_pos) {
  1330. probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
  1331. }
  1332. sent_token_probs_index = probs_stop_pos;
  1333. }
  1334. const json data = format_partial_response(llama, to_send, probs_output);
  1335. const std::string str =
  1336. "data: " +
  1337. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1338. "\n\n";
  1339. LOG_VERBOSE("data stream", {
  1340. { "to_send", str }
  1341. });
  1342. if (!sink.write(str.data(), str.size())) {
  1343. LOG_VERBOSE("stream closed", {});
  1344. llama_print_timings(llama.ctx);
  1345. return false;
  1346. }
  1347. }
  1348. if (!llama.has_next_token) {
  1349. // Generation is done, send extra information.
  1350. const json data = format_final_response(
  1351. llama,
  1352. "",
  1353. std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
  1354. );
  1355. const std::string str =
  1356. "data: " +
  1357. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1358. "\n\n";
  1359. LOG_VERBOSE("data stream", {
  1360. { "to_send", str }
  1361. });
  1362. if (!sink.write(str.data(), str.size())) {
  1363. LOG_VERBOSE("stream closed", {});
  1364. llama_print_timings(llama.ctx);
  1365. return false;
  1366. }
  1367. }
  1368. }
  1369. llama_print_timings(llama.ctx);
  1370. sink.done();
  1371. return true;
  1372. };
  1373. const auto on_complete = [&](bool) {
  1374. llama.mutex.unlock();
  1375. };
  1376. lock.release();
  1377. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  1378. } });
  1379. svr.Post("/infill", [&llama](const Request &req, Response &res)
  1380. {
  1381. auto lock = llama.lock();
  1382. llama.rewind();
  1383. llama_reset_timings(llama.ctx);
  1384. parse_options_infill(json::parse(req.body), llama);
  1385. if (!llama.loadGrammar())
  1386. {
  1387. res.status = 400;
  1388. return;
  1389. }
  1390. llama.loadInfill();
  1391. llama.beginCompletion();
  1392. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  1393. size_t sent_count = 0;
  1394. size_t sent_token_probs_index = 0;
  1395. while (llama.has_next_token) {
  1396. const completion_token_output token_with_probs = llama.doCompletion();
  1397. if (token_with_probs.tok == -1 || llama.multibyte_pending > 0) {
  1398. continue;
  1399. }
  1400. const std::string token_text = llama_token_to_piece(llama.ctx, token_with_probs.tok);
  1401. size_t pos = std::min(sent_count, llama.generated_text.size());
  1402. const std::string str_test = llama.generated_text.substr(pos);
  1403. bool is_stop_full = false;
  1404. size_t stop_pos =
  1405. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  1406. if (stop_pos != std::string::npos) {
  1407. is_stop_full = true;
  1408. llama.generated_text.erase(
  1409. llama.generated_text.begin() + pos + stop_pos,
  1410. llama.generated_text.end());
  1411. pos = std::min(sent_count, llama.generated_text.size());
  1412. } else {
  1413. is_stop_full = false;
  1414. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  1415. STOP_PARTIAL);
  1416. }
  1417. if (
  1418. stop_pos == std::string::npos ||
  1419. // Send rest of the text if we are at the end of the generation
  1420. (!llama.has_next_token && !is_stop_full && stop_pos > 0)
  1421. ) {
  1422. const std::string to_send = llama.generated_text.substr(pos, std::string::npos);
  1423. sent_count += to_send.size();
  1424. std::vector<completion_token_output> probs_output = {};
  1425. if (llama.params.sampling_params.n_probs > 0) {
  1426. const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
  1427. size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
  1428. size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
  1429. if (probs_pos < probs_stop_pos) {
  1430. probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
  1431. }
  1432. sent_token_probs_index = probs_stop_pos;
  1433. }
  1434. const json data = format_partial_response(llama, to_send, probs_output);
  1435. const std::string str =
  1436. "data: " +
  1437. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1438. "\n\n";
  1439. LOG_VERBOSE("data stream", {
  1440. { "to_send", str }
  1441. });
  1442. if (!sink.write(str.data(), str.size())) {
  1443. LOG_VERBOSE("stream closed", {});
  1444. llama_print_timings(llama.ctx);
  1445. return false;
  1446. }
  1447. }
  1448. if (!llama.has_next_token) {
  1449. // Generation is done, send extra information.
  1450. const json data = format_final_response(
  1451. llama,
  1452. "",
  1453. std::vector<completion_token_output>(llama.generated_token_probs.begin(), llama.generated_token_probs.begin() + sent_token_probs_index)
  1454. );
  1455. const std::string str =
  1456. "data: " +
  1457. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1458. "\n\n";
  1459. LOG_VERBOSE("data stream", {
  1460. { "to_send", str }
  1461. });
  1462. if (!sink.write(str.data(), str.size())) {
  1463. LOG_VERBOSE("stream closed", {});
  1464. llama_print_timings(llama.ctx);
  1465. return false;
  1466. }
  1467. }
  1468. }
  1469. llama_print_timings(llama.ctx);
  1470. sink.done();
  1471. return true;
  1472. };
  1473. const auto on_complete = [&](bool) {
  1474. llama.mutex.unlock();
  1475. };
  1476. lock.release();
  1477. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  1478. });
  1479. svr.Get("/model.json", [&llama](const Request &, Response &res)
  1480. {
  1481. const json data = format_generation_settings(llama);
  1482. return res.set_content(data.dump(), "application/json"); });
  1483. svr.Options(R"(/.*)", [](const Request &, Response &res)
  1484. { return res.set_content("", "application/json"); });
  1485. svr.Post("/tokenize", [&llama](const Request &req, Response &res)
  1486. {
  1487. auto lock = llama.lock();
  1488. const json body = json::parse(req.body);
  1489. std::vector<llama_token> tokens;
  1490. if (body.count("content") != 0)
  1491. {
  1492. tokens = llama.tokenize(body["content"], false);
  1493. }
  1494. const json data = format_tokenizer_response(tokens);
  1495. return res.set_content(data.dump(), "application/json"); });
  1496. svr.Post("/detokenize", [&llama](const Request &req, Response &res)
  1497. {
  1498. auto lock = llama.lock();
  1499. const json body = json::parse(req.body);
  1500. std::string content;
  1501. if (body.count("tokens") != 0)
  1502. {
  1503. const std::vector<llama_token> tokens = body["tokens"];
  1504. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  1505. }
  1506. const json data = format_detokenized_response(content);
  1507. return res.set_content(data.dump(), "application/json"); });
  1508. svr.Post("/embedding", [&llama](const Request &req, Response &res)
  1509. {
  1510. auto lock = llama.lock();
  1511. const json body = json::parse(req.body);
  1512. llama.rewind();
  1513. llama_reset_timings(llama.ctx);
  1514. if (body.count("content") != 0)
  1515. {
  1516. llama.prompt = body["content"];
  1517. }
  1518. else
  1519. {
  1520. llama.prompt = "";
  1521. }
  1522. llama.params.n_predict = 0;
  1523. llama.loadPrompt();
  1524. llama.beginCompletion();
  1525. llama.doCompletion();
  1526. const json data = format_embedding_response(llama);
  1527. return res.set_content(data.dump(), "application/json"); });
  1528. svr.set_logger(log_server_request);
  1529. svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep)
  1530. {
  1531. const char fmt[] = "500 Internal Server Error\n%s";
  1532. char buf[BUFSIZ];
  1533. try {
  1534. std::rethrow_exception(std::move(ep));
  1535. } catch (std::exception & e) {
  1536. snprintf(buf, sizeof(buf), fmt, e.what());
  1537. } catch (...) {
  1538. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  1539. }
  1540. res.set_content(buf, "text/plain");
  1541. res.status = 500; });
  1542. svr.set_error_handler([](const Request &, Response &res)
  1543. {
  1544. if (res.status == 400) {
  1545. res.set_content("Invalid request", "text/plain");
  1546. } else if (res.status != 500) {
  1547. res.set_content("File Not Found", "text/plain");
  1548. res.status = 404;
  1549. } });
  1550. // set timeouts and change hostname and port
  1551. svr.set_read_timeout(sparams.read_timeout);
  1552. svr.set_write_timeout(sparams.write_timeout);
  1553. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  1554. {
  1555. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  1556. return 1;
  1557. }
  1558. // Set the base directory for serving static files
  1559. svr.set_base_dir(sparams.public_path);
  1560. // to make it ctrl+clickable:
  1561. printf("\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  1562. LOG_INFO("HTTP server listening", {
  1563. {"hostname", sparams.hostname},
  1564. {"port", sparams.port},
  1565. });
  1566. if (!svr.listen_after_bind())
  1567. {
  1568. return 1;
  1569. }
  1570. if (llama.grammar != nullptr) {
  1571. llama_grammar_free(llama.grammar);
  1572. }
  1573. llama_backend_free();
  1574. return 0;
  1575. }