server.cpp 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include "grammar-parser.h"
  5. #ifndef NDEBUG
  6. // crash the server in debug mode, otherwise send an http 500 error
  7. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  8. #endif
  9. #include "httplib.h"
  10. #include "json.hpp"
  11. // auto generated files (update with ./deps.sh)
  12. #include "index.html.hpp"
  13. #include "index.js.hpp"
  14. #include "completion.js.hpp"
  15. #ifndef SERVER_VERBOSE
  16. #define SERVER_VERBOSE 1
  17. #endif
  18. using namespace httplib;
  19. using json = nlohmann::json;
  20. struct server_params
  21. {
  22. std::string hostname = "127.0.0.1";
  23. std::string public_path = "examples/server/public";
  24. int32_t port = 8080;
  25. int32_t read_timeout = 600;
  26. int32_t write_timeout = 600;
  27. };
  28. // completion token output with probabilities
  29. struct completion_token_output
  30. {
  31. struct token_prob
  32. {
  33. llama_token tok;
  34. float prob;
  35. };
  36. std::vector<token_prob> probs;
  37. llama_token tok;
  38. };
  39. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  40. {
  41. size_t i;
  42. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  43. {
  44. }
  45. return i;
  46. }
  47. enum stop_type
  48. {
  49. STOP_FULL,
  50. STOP_PARTIAL,
  51. };
  52. static bool ends_with(const std::string &str, const std::string &suffix)
  53. {
  54. return str.size() >= suffix.size() &&
  55. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  56. }
  57. static size_t find_partial_stop_string(const std::string &stop,
  58. const std::string &text)
  59. {
  60. if (!text.empty() && !stop.empty())
  61. {
  62. const char text_last_char = text.back();
  63. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  64. {
  65. if (stop[char_index] == text_last_char)
  66. {
  67. const std::string current_partial = stop.substr(0, char_index + 1);
  68. if (ends_with(text, current_partial))
  69. {
  70. return text.size() - char_index - 1;
  71. }
  72. }
  73. }
  74. }
  75. return std::string::npos;
  76. }
  77. template <class Iter>
  78. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  79. {
  80. std::string ret;
  81. for (; begin != end; ++begin)
  82. {
  83. ret += llama_token_to_str(ctx, *begin);
  84. }
  85. return ret;
  86. }
  87. static void server_log(const char *level, const char *function, int line,
  88. const char *message, const nlohmann::ordered_json &extra)
  89. {
  90. nlohmann::ordered_json log{
  91. {"timestamp", time(nullptr)},
  92. {"level", level},
  93. {"function", function},
  94. {"line", line},
  95. {"message", message},
  96. };
  97. if (!extra.empty())
  98. {
  99. log.merge_patch(extra);
  100. }
  101. const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
  102. fprintf(stdout, "%.*s\n", (int)str.size(), str.data());
  103. fflush(stdout);
  104. }
  105. // format incomplete utf-8 multibyte character for output
  106. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  107. {
  108. std::string out = token == -1 ? "" : llama_token_to_str(ctx, token);
  109. // if first bit is 1, meaning it's a partial character
  110. if (out.size() > 0 && (out[0] & 0x80) == 0x80)
  111. {
  112. std::stringstream ss;
  113. ss << std::hex << (out[0] & 0xff);
  114. std::string res(ss.str());
  115. out = "byte: \\x" + res;
  116. }
  117. return out;
  118. }
  119. // convert a vector of completion_token_output to json
  120. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
  121. {
  122. json out = json::array();
  123. for (const auto &prob : probs)
  124. {
  125. json probs_for_token = json::array();
  126. for (const auto &p : prob.probs)
  127. {
  128. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  129. probs_for_token.push_back(json{
  130. {"tok_str", tok_str},
  131. {"prob", p.prob},
  132. });
  133. }
  134. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  135. out.push_back(json{
  136. {"content", tok_str},
  137. {"probs", probs_for_token},
  138. });
  139. }
  140. return out;
  141. }
  142. static bool server_verbose = false;
  143. #if SERVER_VERBOSE != 1
  144. #define LOG_VERBOSE(MSG, ...)
  145. #else
  146. #define LOG_VERBOSE(MSG, ...) \
  147. do \
  148. { \
  149. if (server_verbose) \
  150. { \
  151. server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
  152. } \
  153. } while (0)
  154. #endif
  155. #define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
  156. #define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
  157. #define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  158. struct llama_server_context
  159. {
  160. bool stream = false;
  161. bool has_next_token = false;
  162. std::string generated_text;
  163. std::vector<completion_token_output> generated_token_probs;
  164. size_t num_prompt_tokens = 0;
  165. size_t num_tokens_predicted = 0;
  166. size_t n_past = 0;
  167. size_t n_remain = 0;
  168. std::vector<llama_token> embd;
  169. std::vector<llama_token> last_n_tokens;
  170. llama_model *model = nullptr;
  171. llama_context *ctx = nullptr;
  172. gpt_params params;
  173. llama_grammar *grammar = nullptr;
  174. bool truncated = false;
  175. bool stopped_eos = false;
  176. bool stopped_word = false;
  177. bool stopped_limit = false;
  178. std::string stopping_word;
  179. int32_t multibyte_pending = 0;
  180. std::mutex mutex;
  181. std::unique_lock<std::mutex> lock()
  182. {
  183. return std::unique_lock<std::mutex>(mutex);
  184. }
  185. ~llama_server_context()
  186. {
  187. if (ctx)
  188. {
  189. llama_free(ctx);
  190. ctx = nullptr;
  191. }
  192. if (model)
  193. {
  194. llama_free_model(model);
  195. model = nullptr;
  196. }
  197. }
  198. void rewind()
  199. {
  200. params.antiprompt.clear();
  201. params.grammar.clear();
  202. num_prompt_tokens = 0;
  203. num_tokens_predicted = 0;
  204. generated_text = "";
  205. generated_text.reserve(params.n_ctx);
  206. generated_token_probs.clear();
  207. truncated = false;
  208. stopped_eos = false;
  209. stopped_word = false;
  210. stopped_limit = false;
  211. stopping_word = "";
  212. multibyte_pending = 0;
  213. grammar = nullptr;
  214. n_remain = 0;
  215. n_past = 0;
  216. }
  217. bool loadModel(const gpt_params &params_)
  218. {
  219. params = params_;
  220. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  221. if (model == nullptr)
  222. {
  223. LOG_ERROR("unable to load model", {{"model", params_.model}});
  224. return false;
  225. }
  226. last_n_tokens.resize(params.n_ctx);
  227. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  228. return true;
  229. }
  230. bool loadGrammar()
  231. {
  232. if (!params.grammar.empty()) {
  233. grammar_parser::parse_state parsed_grammar;
  234. parsed_grammar = grammar_parser::parse(params.grammar.c_str());
  235. // will be empty (default) if there are parse errors
  236. if (parsed_grammar.rules.empty()) {
  237. LOG_ERROR("grammar parse error", {{"grammar", params.grammar}});
  238. return false;
  239. }
  240. grammar_parser::print_grammar(stderr, parsed_grammar);
  241. {
  242. auto it = params.logit_bias.find(llama_token_eos());
  243. if (it != params.logit_bias.end() && it->second == -INFINITY) {
  244. LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
  245. }
  246. }
  247. std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
  248. grammar = llama_grammar_init(
  249. grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
  250. }
  251. return true;
  252. }
  253. void loadPrompt()
  254. {
  255. params.prompt.insert(0, 1, ' '); // always add a first space
  256. std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
  257. num_prompt_tokens = prompt_tokens.size();
  258. if (params.n_keep < 0)
  259. {
  260. params.n_keep = (int)num_prompt_tokens;
  261. }
  262. params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
  263. // if input prompt is too big, truncate like normal
  264. if (num_prompt_tokens >= (size_t)params.n_ctx)
  265. {
  266. const int n_left = (params.n_ctx - params.n_keep) / 2;
  267. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  268. const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
  269. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  270. std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  271. LOG_VERBOSE("input truncated", {
  272. {"n_ctx", params.n_ctx},
  273. {"n_keep", params.n_keep},
  274. {"n_left", n_left},
  275. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  276. });
  277. truncated = true;
  278. prompt_tokens = new_tokens;
  279. }
  280. else
  281. {
  282. const size_t ps = num_prompt_tokens;
  283. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  284. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  285. }
  286. // compare the evaluated prompt with the new prompt
  287. n_past = common_part(embd, prompt_tokens);
  288. embd = prompt_tokens;
  289. if (n_past == num_prompt_tokens)
  290. {
  291. // we have to evaluate at least 1 token to generate logits.
  292. n_past--;
  293. }
  294. LOG_VERBOSE("prompt ingested", {
  295. {"n_past", n_past},
  296. {"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
  297. {"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  298. });
  299. has_next_token = true;
  300. }
  301. void beginCompletion()
  302. {
  303. // number of tokens to keep when resetting context
  304. n_remain = params.n_predict;
  305. llama_set_rng_seed(ctx, params.seed);
  306. }
  307. completion_token_output nextToken()
  308. {
  309. completion_token_output result;
  310. result.tok = -1;
  311. if (embd.size() >= (size_t)params.n_ctx)
  312. {
  313. // Reset context
  314. const int n_left = (params.n_ctx - params.n_keep) / 2;
  315. std::vector<llama_token> new_tokens(embd.begin(), embd.begin() + params.n_keep);
  316. new_tokens.insert(new_tokens.end(), embd.end() - n_left, embd.end());
  317. embd = new_tokens;
  318. n_past = params.n_keep;
  319. truncated = true;
  320. LOG_VERBOSE("input truncated", {
  321. {"n_ctx", params.n_ctx},
  322. {"n_keep", params.n_keep},
  323. {"n_left", n_left},
  324. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  325. });
  326. }
  327. while (n_past < embd.size())
  328. {
  329. int n_eval = (int)embd.size() - n_past;
  330. if (n_eval > params.n_batch)
  331. {
  332. n_eval = params.n_batch;
  333. }
  334. if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads))
  335. {
  336. LOG_ERROR("failed to eval", {
  337. {"n_eval", n_eval},
  338. {"n_past", n_past},
  339. {"n_threads", params.n_threads},
  340. {"embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  341. });
  342. has_next_token = false;
  343. return result;
  344. }
  345. n_past += n_eval;
  346. }
  347. if (params.n_predict == 0)
  348. {
  349. has_next_token = false;
  350. result.tok = llama_token_eos();
  351. return result;
  352. }
  353. // out of user input, sample next token
  354. const float temp = params.temp;
  355. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  356. const float top_p = params.top_p;
  357. const float tfs_z = params.tfs_z;
  358. const float typical_p = params.typical_p;
  359. const int32_t repeat_last_n = params.repeat_last_n < 0 ? params.n_ctx : params.repeat_last_n;
  360. const float repeat_penalty = params.repeat_penalty;
  361. const float alpha_presence = params.presence_penalty;
  362. const float alpha_frequency = params.frequency_penalty;
  363. const int mirostat = params.mirostat;
  364. const float mirostat_tau = params.mirostat_tau;
  365. const float mirostat_eta = params.mirostat_eta;
  366. const bool penalize_nl = params.penalize_nl;
  367. const int32_t n_probs = params.n_probs;
  368. {
  369. auto *logits = llama_get_logits(ctx);
  370. auto n_vocab = llama_n_vocab(ctx);
  371. // Apply params.logit_bias map
  372. for (const auto &it : params.logit_bias)
  373. {
  374. logits[it.first] += it.second;
  375. }
  376. std::vector<llama_token_data> candidates;
  377. candidates.reserve(n_vocab);
  378. for (llama_token token_id = 0; token_id < n_vocab; token_id++)
  379. {
  380. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  381. }
  382. llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
  383. // Apply penalties
  384. float nl_logit = logits[llama_token_nl()];
  385. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
  386. llama_sample_repetition_penalty(ctx, &candidates_p,
  387. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  388. last_n_repeat, repeat_penalty);
  389. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  390. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  391. last_n_repeat, alpha_frequency, alpha_presence);
  392. if (!penalize_nl)
  393. {
  394. logits[llama_token_nl()] = nl_logit;
  395. }
  396. if (grammar != nullptr) {
  397. llama_sample_grammar(ctx, &candidates_p, grammar);
  398. }
  399. if (temp <= 0)
  400. {
  401. // Greedy sampling
  402. result.tok = llama_sample_token_greedy(ctx, &candidates_p);
  403. if (n_probs > 0)
  404. {
  405. llama_sample_softmax(ctx, &candidates_p);
  406. }
  407. }
  408. else
  409. {
  410. if (mirostat == 1)
  411. {
  412. static float mirostat_mu = 2.0f * mirostat_tau;
  413. const int mirostat_m = 100;
  414. llama_sample_temperature(ctx, &candidates_p, temp);
  415. result.tok = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  416. }
  417. else if (mirostat == 2)
  418. {
  419. static float mirostat_mu = 2.0f * mirostat_tau;
  420. llama_sample_temperature(ctx, &candidates_p, temp);
  421. result.tok = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  422. }
  423. else
  424. {
  425. // Temperature sampling
  426. size_t min_keep = std::max(1, n_probs);
  427. llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
  428. llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
  429. llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
  430. llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
  431. llama_sample_temperature(ctx, &candidates_p, temp);
  432. result.tok = llama_sample_token(ctx, &candidates_p);
  433. }
  434. }
  435. if (grammar != nullptr) {
  436. llama_grammar_accept_token(ctx, grammar, result.tok);
  437. }
  438. for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
  439. {
  440. result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
  441. }
  442. last_n_tokens.erase(last_n_tokens.begin());
  443. last_n_tokens.push_back(result.tok);
  444. num_tokens_predicted++;
  445. }
  446. // add it to the context
  447. embd.push_back(result.tok);
  448. // decrement remaining sampling budget
  449. --n_remain;
  450. if (!embd.empty() && embd.back() == llama_token_eos())
  451. {
  452. // stopping_word = llama_token_to_str(ctx, embd.back());
  453. has_next_token = false;
  454. stopped_eos = true;
  455. LOG_VERBOSE("eos token found", {});
  456. return result;
  457. }
  458. has_next_token = params.n_predict == -1 || n_remain != 0;
  459. return result;
  460. }
  461. size_t findStoppingStrings(const std::string &text, const size_t last_token_size,
  462. const stop_type type)
  463. {
  464. size_t stop_pos = std::string::npos;
  465. for (const std::string &word : params.antiprompt)
  466. {
  467. size_t pos;
  468. if (type == STOP_FULL)
  469. {
  470. const size_t tmp = word.size() + last_token_size;
  471. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  472. pos = text.find(word, from_pos);
  473. }
  474. else
  475. {
  476. pos = find_partial_stop_string(word, text);
  477. }
  478. if (pos != std::string::npos &&
  479. (stop_pos == std::string::npos || pos < stop_pos))
  480. {
  481. if (type == STOP_FULL)
  482. {
  483. stopping_word = word;
  484. stopped_word = true;
  485. has_next_token = false;
  486. }
  487. stop_pos = pos;
  488. }
  489. }
  490. return stop_pos;
  491. }
  492. completion_token_output doCompletion()
  493. {
  494. const completion_token_output token_with_probs = nextToken();
  495. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(ctx, token_with_probs.tok);
  496. generated_text += token_text;
  497. if (params.n_probs > 0)
  498. {
  499. generated_token_probs.push_back(token_with_probs);
  500. }
  501. if (multibyte_pending > 0)
  502. {
  503. multibyte_pending -= token_text.size();
  504. }
  505. else if (token_text.size() == 1)
  506. {
  507. const char c = token_text[0];
  508. // 2-byte characters: 110xxxxx 10xxxxxx
  509. if ((c & 0xE0) == 0xC0)
  510. {
  511. multibyte_pending = 1;
  512. // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
  513. }
  514. else if ((c & 0xF0) == 0xE0)
  515. {
  516. multibyte_pending = 2;
  517. // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
  518. }
  519. else if ((c & 0xF8) == 0xF0)
  520. {
  521. multibyte_pending = 3;
  522. }
  523. else
  524. {
  525. multibyte_pending = 0;
  526. }
  527. }
  528. if (multibyte_pending > 0 && !has_next_token)
  529. {
  530. has_next_token = true;
  531. n_remain++;
  532. }
  533. if (!has_next_token && n_remain == 0)
  534. {
  535. stopped_limit = true;
  536. }
  537. LOG_VERBOSE("next token", {
  538. {"token", token_with_probs.tok},
  539. {"token_text", tokens_to_output_formatted_string(ctx, token_with_probs.tok)},
  540. {"has_next_token", has_next_token},
  541. {"n_remain", n_remain},
  542. {"num_tokens_predicted", num_tokens_predicted},
  543. {"stopped_eos", stopped_eos},
  544. {"stopped_word", stopped_word},
  545. {"stopped_limit", stopped_limit},
  546. {"stopping_word", stopping_word},
  547. });
  548. return token_with_probs;
  549. }
  550. std::vector<float> getEmbedding()
  551. {
  552. static const int n_embd = llama_n_embd(ctx);
  553. if (!params.embedding)
  554. {
  555. LOG_WARNING("embedding disabled", {
  556. {"params.embedding", params.embedding},
  557. });
  558. return std::vector<float>(n_embd, 0.0f);
  559. }
  560. const float *data = llama_get_embeddings(ctx);
  561. std::vector<float> embedding(data, data + n_embd);
  562. return embedding;
  563. }
  564. };
  565. static void server_print_usage(const char *argv0, const gpt_params &params,
  566. const server_params &sparams)
  567. {
  568. fprintf(stdout, "usage: %s [options]\n", argv0);
  569. fprintf(stdout, "\n");
  570. fprintf(stdout, "options:\n");
  571. fprintf(stdout, " -h, --help show this help message and exit\n");
  572. fprintf(stdout, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  573. fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  574. fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  575. fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa);
  576. fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps);
  577. fprintf(stdout, " --rope-freq-base N RoPE base frequency (default: %.1f)\n", params.rope_freq_base);
  578. fprintf(stdout, " --rope-freq-scale N RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale);
  579. fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  580. fprintf(stdout, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  581. fprintf(stdout, " not recommended: doubles context memory required and no measurable increase in quality\n");
  582. if (llama_mlock_supported())
  583. {
  584. fprintf(stdout, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
  585. }
  586. if (llama_mmap_supported())
  587. {
  588. fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  589. }
  590. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  591. fprintf(stdout, " -ngl N, --n-gpu-layers N\n");
  592. fprintf(stdout, " number of layers to store in VRAM\n");
  593. fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
  594. fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  595. fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  596. fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
  597. fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
  598. fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
  599. fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
  600. fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
  601. #endif
  602. fprintf(stdout, " -m FNAME, --model FNAME\n");
  603. fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
  604. fprintf(stdout, " -a ALIAS, --alias ALIAS\n");
  605. fprintf(stdout, " set an alias for the model, will be added as `model` field in completion response\n");
  606. fprintf(stdout, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  607. fprintf(stdout, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  608. fprintf(stdout, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  609. fprintf(stdout, " --port PORT port to listen (default (default: %d)\n", sparams.port);
  610. fprintf(stdout, " --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  611. fprintf(stdout, " -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  612. fprintf(stdout, " --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  613. fprintf(stdout, "\n");
  614. }
  615. static void server_params_parse(int argc, char **argv, server_params &sparams,
  616. gpt_params &params)
  617. {
  618. gpt_params default_params;
  619. server_params default_sparams;
  620. std::string arg;
  621. bool invalid_param = false;
  622. for (int i = 1; i < argc; i++)
  623. {
  624. arg = argv[i];
  625. if (arg == "--port")
  626. {
  627. if (++i >= argc)
  628. {
  629. invalid_param = true;
  630. break;
  631. }
  632. sparams.port = std::stoi(argv[i]);
  633. }
  634. else if (arg == "--host")
  635. {
  636. if (++i >= argc)
  637. {
  638. invalid_param = true;
  639. break;
  640. }
  641. sparams.hostname = argv[i];
  642. }
  643. else if (arg == "--path")
  644. {
  645. if (++i >= argc)
  646. {
  647. invalid_param = true;
  648. break;
  649. }
  650. sparams.public_path = argv[i];
  651. }
  652. else if (arg == "--timeout" || arg == "-to")
  653. {
  654. if (++i >= argc)
  655. {
  656. invalid_param = true;
  657. break;
  658. }
  659. sparams.read_timeout = std::stoi(argv[i]);
  660. sparams.write_timeout = std::stoi(argv[i]);
  661. }
  662. else if (arg == "-m" || arg == "--model")
  663. {
  664. if (++i >= argc)
  665. {
  666. invalid_param = true;
  667. break;
  668. }
  669. params.model = argv[i];
  670. }
  671. else if (arg == "-a" || arg == "--alias")
  672. {
  673. if (++i >= argc)
  674. {
  675. invalid_param = true;
  676. break;
  677. }
  678. params.model_alias = argv[i];
  679. }
  680. else if (arg == "-h" || arg == "--help")
  681. {
  682. server_print_usage(argv[0], default_params, default_sparams);
  683. exit(0);
  684. }
  685. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  686. {
  687. if (++i >= argc)
  688. {
  689. invalid_param = true;
  690. break;
  691. }
  692. params.n_ctx = std::stoi(argv[i]);
  693. }
  694. else if (arg == "-gqa" || arg == "--gqa")
  695. {
  696. if (++i >= argc)
  697. {
  698. invalid_param = true;
  699. break;
  700. }
  701. params.n_gqa = std::stoi(argv[i]);
  702. }
  703. else if (arg == "-eps" || arg == "--rms-norm-eps") {
  704. if (++i >= argc)
  705. {
  706. invalid_param = true;
  707. break;
  708. }
  709. params.rms_norm_eps = std::stof(argv[i]);
  710. }
  711. else if (arg == "--rope-freq-base")
  712. {
  713. if (++i >= argc)
  714. {
  715. invalid_param = true;
  716. break;
  717. }
  718. params.rope_freq_base = std::stof(argv[i]);
  719. }
  720. else if (arg == "--rope-freq-scale")
  721. {
  722. if (++i >= argc)
  723. {
  724. invalid_param = true;
  725. break;
  726. }
  727. params.rope_freq_scale = std::stof(argv[i]);
  728. }
  729. else if (arg == "--memory-f32" || arg == "--memory_f32")
  730. {
  731. params.memory_f16 = false;
  732. }
  733. else if (arg == "--threads" || arg == "-t")
  734. {
  735. if (++i >= argc)
  736. {
  737. invalid_param = true;
  738. break;
  739. }
  740. params.n_threads = std::stoi(argv[i]);
  741. }
  742. else if (arg == "-b" || arg == "--batch-size")
  743. {
  744. if (++i >= argc)
  745. {
  746. invalid_param = true;
  747. break;
  748. }
  749. params.n_batch = std::stoi(argv[i]);
  750. params.n_batch = std::min(512, params.n_batch);
  751. }
  752. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  753. {
  754. if (++i >= argc)
  755. {
  756. invalid_param = true;
  757. break;
  758. }
  759. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  760. params.n_gpu_layers = std::stoi(argv[i]);
  761. #else
  762. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  763. "See main README.md for information on enabling GPU BLAS support",
  764. {{"n_gpu_layers", params.n_gpu_layers}});
  765. #endif
  766. }
  767. else if (arg == "--tensor-split" || arg == "-ts")
  768. {
  769. if (++i >= argc)
  770. {
  771. invalid_param = true;
  772. break;
  773. }
  774. #ifdef GGML_USE_CUBLAS
  775. std::string arg_next = argv[i];
  776. // split string by , and /
  777. const std::regex regex{R"([,/]+)"};
  778. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  779. std::vector<std::string> split_arg{it, {}};
  780. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  781. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
  782. {
  783. if (i_device < split_arg.size())
  784. {
  785. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  786. }
  787. else
  788. {
  789. params.tensor_split[i_device] = 0.0f;
  790. }
  791. }
  792. #else
  793. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  794. #endif // GGML_USE_CUBLAS
  795. }
  796. else if (arg == "--low-vram" || arg == "-lv")
  797. {
  798. #ifdef GGML_USE_CUBLAS
  799. params.low_vram = true;
  800. #else
  801. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {});
  802. #endif // GGML_USE_CUBLAS
  803. }
  804. else if (arg == "--mul-mat-q" || arg == "-mmq")
  805. {
  806. #ifdef GGML_USE_CUBLAS
  807. params.mul_mat_q = true;
  808. #else
  809. LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n", {});
  810. #endif // GGML_USE_CUBLAS
  811. }
  812. else if (arg == "--main-gpu" || arg == "-mg")
  813. {
  814. if (++i >= argc)
  815. {
  816. invalid_param = true;
  817. break;
  818. }
  819. #ifdef GGML_USE_CUBLAS
  820. params.main_gpu = std::stoi(argv[i]);
  821. #else
  822. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  823. #endif
  824. }
  825. else if (arg == "--lora")
  826. {
  827. if (++i >= argc)
  828. {
  829. invalid_param = true;
  830. break;
  831. }
  832. params.lora_adapter = argv[i];
  833. params.use_mmap = false;
  834. }
  835. else if (arg == "--lora-base")
  836. {
  837. if (++i >= argc)
  838. {
  839. invalid_param = true;
  840. break;
  841. }
  842. params.lora_base = argv[i];
  843. }
  844. else if (arg == "-v" || arg == "--verbose")
  845. {
  846. #if SERVER_VERBOSE != 1
  847. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  848. #else
  849. server_verbose = true;
  850. #endif
  851. }
  852. else if (arg == "--mlock")
  853. {
  854. params.use_mlock = true;
  855. }
  856. else if (arg == "--no-mmap")
  857. {
  858. params.use_mmap = false;
  859. }
  860. else if (arg == "--embedding")
  861. {
  862. params.embedding = true;
  863. }
  864. else
  865. {
  866. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  867. server_print_usage(argv[0], default_params, default_sparams);
  868. exit(1);
  869. }
  870. }
  871. if (invalid_param)
  872. {
  873. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  874. server_print_usage(argv[0], default_params, default_sparams);
  875. exit(1);
  876. }
  877. }
  878. static json format_generation_settings(llama_server_context &llama)
  879. {
  880. const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
  881. const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
  882. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  883. return json{
  884. {"n_ctx", llama.params.n_ctx},
  885. {"model", llama.params.model_alias},
  886. {"seed", llama.params.seed},
  887. {"temp", llama.params.temp},
  888. {"top_k", llama.params.top_k},
  889. {"top_p", llama.params.top_p},
  890. {"tfs_z", llama.params.tfs_z},
  891. {"typical_p", llama.params.typical_p},
  892. {"repeat_last_n", llama.params.repeat_last_n},
  893. {"repeat_penalty", llama.params.repeat_penalty},
  894. {"presence_penalty", llama.params.presence_penalty},
  895. {"frequency_penalty", llama.params.frequency_penalty},
  896. {"mirostat", llama.params.mirostat},
  897. {"mirostat_tau", llama.params.mirostat_tau},
  898. {"mirostat_eta", llama.params.mirostat_eta},
  899. {"penalize_nl", llama.params.penalize_nl},
  900. {"stop", llama.params.antiprompt},
  901. {"n_predict", llama.params.n_predict},
  902. {"n_keep", llama.params.n_keep},
  903. {"ignore_eos", ignore_eos},
  904. {"stream", llama.stream},
  905. {"logit_bias", llama.params.logit_bias},
  906. {"n_probs", llama.params.n_probs},
  907. {"grammar", llama.params.grammar},
  908. };
  909. }
  910. static json format_embedding_response(llama_server_context &llama)
  911. {
  912. return json{
  913. {"embedding", llama.getEmbedding()},
  914. };
  915. }
  916. static json format_timings(llama_server_context &llama)
  917. {
  918. const auto timings = llama_get_timings(llama.ctx);
  919. assert(timings.n_eval == llama.num_tokens_predicted);
  920. return json{
  921. {"prompt_n", timings.n_eval},
  922. {"prompt_ms", timings.t_p_eval_ms},
  923. {"prompt_per_token_ms", timings.t_p_eval_ms / timings.n_p_eval},
  924. {"prompt_per_second", 1e3 / timings.t_p_eval_ms * timings.n_p_eval},
  925. {"predicted_n", timings.n_eval},
  926. {"predicted_ms", timings.t_eval_ms},
  927. {"predicted_per_token_ms", timings.t_eval_ms / timings.n_eval},
  928. {"predicted_per_second", 1e3 / timings.t_eval_ms * timings.n_eval},
  929. };
  930. }
  931. static json format_final_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  932. {
  933. json res = json{
  934. {"content", content},
  935. {"stop", true},
  936. {"model", llama.params.model_alias},
  937. {"tokens_predicted", llama.num_tokens_predicted},
  938. {"tokens_evaluated", llama.num_prompt_tokens},
  939. {"generation_settings", format_generation_settings(llama)},
  940. {"prompt", llama.params.prompt},
  941. {"truncated", llama.truncated},
  942. {"stopped_eos", llama.stopped_eos},
  943. {"stopped_word", llama.stopped_word},
  944. {"stopped_limit", llama.stopped_limit},
  945. {"stopping_word", llama.stopping_word},
  946. {"tokens_cached", llama.n_past},
  947. {"tokens_predicted", llama.num_tokens_predicted},
  948. {"timings", format_timings(llama)},
  949. };
  950. if (llama.params.n_probs > 0)
  951. {
  952. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  953. }
  954. return res;
  955. }
  956. static json format_partial_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  957. {
  958. json res = json{
  959. {"content", content},
  960. {"stop", false},
  961. };
  962. if (llama.params.n_probs > 0)
  963. {
  964. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  965. }
  966. return res;
  967. }
  968. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  969. {
  970. return json{
  971. {"tokens", tokens}};
  972. }
  973. static void parse_options_completion(const json &body, llama_server_context &llama)
  974. {
  975. gpt_params default_params;
  976. llama.stream = body.value("stream", false);
  977. llama.params.n_predict = body.value("n_predict", default_params.n_predict);
  978. llama.params.top_k = body.value("top_k", default_params.top_k);
  979. llama.params.top_p = body.value("top_p", default_params.top_p);
  980. llama.params.tfs_z = body.value("tfs_z", default_params.tfs_z);
  981. llama.params.typical_p = body.value("typical_p", default_params.typical_p);
  982. llama.params.repeat_last_n = body.value("repeat_last_n", default_params.repeat_last_n);
  983. llama.params.temp = body.value("temperature", default_params.temp);
  984. llama.params.repeat_penalty = body.value("repeat_penalty", default_params.repeat_penalty);
  985. llama.params.presence_penalty = body.value("presence_penalty", default_params.presence_penalty);
  986. llama.params.frequency_penalty = body.value("frequency_penalty", default_params.frequency_penalty);
  987. llama.params.mirostat = body.value("mirostat", default_params.mirostat);
  988. llama.params.mirostat_tau = body.value("mirostat_tau", default_params.mirostat_tau);
  989. llama.params.mirostat_eta = body.value("mirostat_eta", default_params.mirostat_eta);
  990. llama.params.penalize_nl = body.value("penalize_nl", default_params.penalize_nl);
  991. llama.params.n_keep = body.value("n_keep", default_params.n_keep);
  992. llama.params.seed = body.value("seed", default_params.seed);
  993. llama.params.prompt = body.value("prompt", default_params.prompt);
  994. llama.params.grammar = body.value("grammar", default_params.grammar);
  995. llama.params.n_probs = body.value("n_probs", default_params.n_probs);
  996. llama.params.logit_bias.clear();
  997. if (body.value("ignore_eos", false))
  998. {
  999. llama.params.logit_bias[llama_token_eos()] = -INFINITY;
  1000. }
  1001. const auto &logit_bias = body.find("logit_bias");
  1002. if (logit_bias != body.end() && logit_bias->is_array())
  1003. {
  1004. const int n_vocab = llama_n_vocab(llama.ctx);
  1005. for (const auto &el : *logit_bias)
  1006. {
  1007. if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
  1008. {
  1009. llama_token tok = el[0].get<llama_token>();
  1010. if (tok >= 0 && tok < n_vocab)
  1011. {
  1012. if (el[1].is_number())
  1013. {
  1014. llama.params.logit_bias[tok] = el[1].get<float>();
  1015. }
  1016. else if (el[1].is_boolean() && !el[1].get<bool>())
  1017. {
  1018. llama.params.logit_bias[tok] = -INFINITY;
  1019. }
  1020. }
  1021. }
  1022. }
  1023. }
  1024. llama.params.antiprompt.clear();
  1025. const auto &stop = body.find("stop");
  1026. if (stop != body.end() && stop->is_array())
  1027. {
  1028. for (const auto &word : *stop)
  1029. {
  1030. if (!word.empty())
  1031. {
  1032. llama.params.antiprompt.push_back(word);
  1033. }
  1034. }
  1035. }
  1036. LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
  1037. }
  1038. static void log_server_request(const Request &req, const Response &res)
  1039. {
  1040. LOG_INFO("request", {
  1041. {"remote_addr", req.remote_addr},
  1042. {"remote_port", req.remote_port},
  1043. {"status", res.status},
  1044. {"method", req.method},
  1045. {"path", req.path},
  1046. {"params", req.params},
  1047. });
  1048. LOG_VERBOSE("request", {
  1049. {"request", req.body},
  1050. {"response", res.body},
  1051. });
  1052. }
  1053. int main(int argc, char **argv)
  1054. {
  1055. // own arguments required by this example
  1056. gpt_params params;
  1057. server_params sparams;
  1058. // struct that contains llama context and inference
  1059. llama_server_context llama;
  1060. server_params_parse(argc, argv, sparams, params);
  1061. if (params.model_alias == "unknown")
  1062. {
  1063. params.model_alias = params.model;
  1064. }
  1065. llama_backend_init(params.numa);
  1066. LOG_INFO("build info", {{"build", BUILD_NUMBER},
  1067. {"commit", BUILD_COMMIT}});
  1068. LOG_INFO("system info", {
  1069. {"n_threads", params.n_threads},
  1070. {"total_threads", std::thread::hardware_concurrency()},
  1071. {"system_info", llama_print_system_info()},
  1072. });
  1073. // load the model
  1074. if (!llama.loadModel(params))
  1075. {
  1076. return 1;
  1077. }
  1078. Server svr;
  1079. svr.set_default_headers({{"Server", "llama.cpp"},
  1080. {"Access-Control-Allow-Origin", "*"},
  1081. {"Access-Control-Allow-Headers", "content-type"}});
  1082. // this is only called if no index.html is found in the public --path
  1083. svr.Get("/", [](const Request &, Response &res)
  1084. {
  1085. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html");
  1086. return false; });
  1087. // this is only called if no index.js is found in the public --path
  1088. svr.Get("/index.js", [](const Request &, Response &res)
  1089. {
  1090. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript");
  1091. return false; });
  1092. // this is only called if no index.html is found in the public --path
  1093. svr.Get("/completion.js", [](const Request &, Response &res)
  1094. {
  1095. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
  1096. return false; });
  1097. svr.Post("/completion", [&llama](const Request &req, Response &res)
  1098. {
  1099. auto lock = llama.lock();
  1100. llama.rewind();
  1101. llama_reset_timings(llama.ctx);
  1102. parse_options_completion(json::parse(req.body), llama);
  1103. if (!llama.loadGrammar())
  1104. {
  1105. res.status = 400;
  1106. return;
  1107. }
  1108. llama.loadPrompt();
  1109. llama.beginCompletion();
  1110. if (!llama.stream) {
  1111. size_t stop_pos = std::string::npos;
  1112. while (llama.has_next_token) {
  1113. const completion_token_output token_with_probs = llama.doCompletion();
  1114. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
  1115. stop_pos = llama.findStoppingStrings(llama.generated_text,
  1116. token_text.size(), STOP_FULL);
  1117. }
  1118. if (stop_pos == std::string::npos) {
  1119. stop_pos = llama.findStoppingStrings(llama.generated_text, 0, STOP_PARTIAL);
  1120. }
  1121. if (stop_pos != std::string::npos) {
  1122. llama.generated_text.erase(llama.generated_text.begin() + stop_pos,
  1123. llama.generated_text.end());
  1124. }
  1125. const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
  1126. llama_print_timings(llama.ctx);
  1127. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
  1128. "application/json");
  1129. } else {
  1130. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  1131. size_t sent_count = 0;
  1132. size_t sent_token_probs_index = 0;
  1133. while (llama.has_next_token) {
  1134. const completion_token_output token_with_probs = llama.doCompletion();
  1135. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
  1136. if (llama.multibyte_pending > 0) {
  1137. continue;
  1138. }
  1139. size_t pos = std::min(sent_count, llama.generated_text.size());
  1140. const std::string str_test = llama.generated_text.substr(pos);
  1141. size_t stop_pos =
  1142. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  1143. if (stop_pos != std::string::npos) {
  1144. llama.generated_text.erase(
  1145. llama.generated_text.begin() + pos + stop_pos,
  1146. llama.generated_text.end());
  1147. pos = std::min(sent_count, llama.generated_text.size());
  1148. } else {
  1149. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  1150. STOP_PARTIAL);
  1151. }
  1152. const std::string to_send = llama.generated_text.substr(pos, stop_pos);
  1153. sent_count += to_send.size();
  1154. std::vector<completion_token_output> probs_output = {};
  1155. if (llama.params.n_probs > 0) {
  1156. const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
  1157. size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
  1158. size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
  1159. if (probs_pos < probs_stop_pos) {
  1160. probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
  1161. }
  1162. sent_token_probs_index = probs_stop_pos;
  1163. }
  1164. const json data = llama.has_next_token
  1165. ? format_partial_response(llama, to_send, probs_output)
  1166. // Generation is done, send extra information.
  1167. : format_final_response(llama, to_send, llama.generated_token_probs);
  1168. const std::string str =
  1169. "data: " +
  1170. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1171. "\n\n";
  1172. LOG_VERBOSE("data stream", {
  1173. { "to_send", str }
  1174. });
  1175. if (!sink.write(str.data(), str.size())) {
  1176. LOG_VERBOSE("stream closed", {});
  1177. llama_print_timings(llama.ctx);
  1178. return false;
  1179. }
  1180. }
  1181. llama_print_timings(llama.ctx);
  1182. sink.done();
  1183. return true;
  1184. };
  1185. const auto on_complete = [&](bool) {
  1186. llama.mutex.unlock();
  1187. };
  1188. lock.release();
  1189. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  1190. } });
  1191. svr.Get("/model.json", [&llama](const Request &, Response &res)
  1192. {
  1193. const json data = format_generation_settings(llama);
  1194. return res.set_content(data.dump(), "application/json"); });
  1195. svr.Options(R"(/.*)", [](const Request &, Response &res)
  1196. { return res.set_content("", "application/json"); });
  1197. svr.Post("/tokenize", [&llama](const Request &req, Response &res)
  1198. {
  1199. auto lock = llama.lock();
  1200. const json body = json::parse(req.body);
  1201. const std::string content = body.value("content", "");
  1202. const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
  1203. const json data = format_tokenizer_response(tokens);
  1204. return res.set_content(data.dump(), "application/json"); });
  1205. svr.Post("/embedding", [&llama](const Request &req, Response &res)
  1206. {
  1207. auto lock = llama.lock();
  1208. const json body = json::parse(req.body);
  1209. llama.rewind();
  1210. llama_reset_timings(llama.ctx);
  1211. llama.params.prompt = body.value("content", "");
  1212. llama.params.n_predict = 0;
  1213. llama.loadPrompt();
  1214. llama.beginCompletion();
  1215. llama.doCompletion();
  1216. const json data = format_embedding_response(llama);
  1217. return res.set_content(data.dump(), "application/json"); });
  1218. svr.set_logger(log_server_request);
  1219. svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep)
  1220. {
  1221. const auto * fmt = "500 Internal Server Error\n%s";
  1222. char buf[BUFSIZ];
  1223. try {
  1224. std::rethrow_exception(std::move(ep));
  1225. } catch (std::exception & e) {
  1226. snprintf(buf, sizeof(buf), fmt, e.what());
  1227. } catch (...) {
  1228. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  1229. }
  1230. res.set_content(buf, "text/plain");
  1231. res.status = 500; });
  1232. svr.set_error_handler([](const Request &, Response &res)
  1233. {
  1234. if (res.status == 400) {
  1235. res.set_content("Invalid request", "text/plain");
  1236. } else {
  1237. res.set_content("File Not Found", "text/plain");
  1238. res.status = 404;
  1239. } });
  1240. // set timeouts and change hostname and port
  1241. svr.set_read_timeout(sparams.read_timeout);
  1242. svr.set_write_timeout(sparams.write_timeout);
  1243. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  1244. {
  1245. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  1246. return 1;
  1247. }
  1248. // Set the base directory for serving static files
  1249. svr.set_base_dir(sparams.public_path);
  1250. // to make it ctrl+clickable:
  1251. fprintf(stdout, "\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  1252. LOG_INFO("HTTP server listening", {
  1253. {"hostname", sparams.hostname},
  1254. {"port", sparams.port},
  1255. });
  1256. if (!svr.listen_after_bind())
  1257. {
  1258. return 1;
  1259. }
  1260. if (llama.grammar != nullptr) {
  1261. llama_grammar_free(llama.grammar);
  1262. }
  1263. llama_backend_free();
  1264. return 0;
  1265. }