1
0

server.cpp 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #ifndef NDEBUG
  5. // crash the server in debug mode, otherwise send an http 500 error
  6. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  7. #endif
  8. #include "httplib.h"
  9. #include "json.hpp"
  10. // auto generated files (update with ./deps.sh)
  11. #include "index.html.hpp"
  12. #include "index.js.hpp"
  13. #include "completion.js.hpp"
  14. #ifndef SERVER_VERBOSE
  15. #define SERVER_VERBOSE 1
  16. #endif
  17. using namespace httplib;
  18. using json = nlohmann::json;
  19. struct server_params
  20. {
  21. std::string hostname = "127.0.0.1";
  22. std::string public_path = "examples/server/public";
  23. int32_t port = 8080;
  24. int32_t read_timeout = 600;
  25. int32_t write_timeout = 600;
  26. };
  27. // completion token output with probabilities
  28. struct completion_token_output
  29. {
  30. struct token_prob
  31. {
  32. llama_token tok;
  33. float prob;
  34. };
  35. std::vector<token_prob> probs;
  36. llama_token tok;
  37. };
  38. static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
  39. {
  40. size_t i;
  41. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
  42. {
  43. }
  44. return i;
  45. }
  46. enum stop_type
  47. {
  48. STOP_FULL,
  49. STOP_PARTIAL,
  50. };
  51. static bool ends_with(const std::string &str, const std::string &suffix)
  52. {
  53. return str.size() >= suffix.size() &&
  54. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  55. }
  56. static size_t find_partial_stop_string(const std::string &stop,
  57. const std::string &text)
  58. {
  59. if (!text.empty() && !stop.empty())
  60. {
  61. const char text_last_char = text.back();
  62. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
  63. {
  64. if (stop[char_index] == text_last_char)
  65. {
  66. const std::string current_partial = stop.substr(0, char_index + 1);
  67. if (ends_with(text, current_partial))
  68. {
  69. return text.size() - char_index - 1;
  70. }
  71. }
  72. }
  73. }
  74. return std::string::npos;
  75. }
  76. template <class Iter>
  77. static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
  78. {
  79. std::string ret;
  80. for (; begin != end; ++begin)
  81. {
  82. ret += llama_token_to_str(ctx, *begin);
  83. }
  84. return ret;
  85. }
  86. static void server_log(const char *level, const char *function, int line,
  87. const char *message, const nlohmann::ordered_json &extra)
  88. {
  89. nlohmann::ordered_json log{
  90. {"timestamp", time(nullptr)},
  91. {"level", level},
  92. {"function", function},
  93. {"line", line},
  94. {"message", message},
  95. };
  96. if (!extra.empty())
  97. {
  98. log.merge_patch(extra);
  99. }
  100. const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
  101. fprintf(stdout, "%.*s\n", (int)str.size(), str.data());
  102. fflush(stdout);
  103. }
  104. // format incomplete utf-8 multibyte character for output
  105. static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
  106. {
  107. std::string out = token == -1 ? "" : llama_token_to_str(ctx, token);
  108. // if first bit is 1, meaning it's a partial character
  109. if (out.size() > 0 && (out[0] & 0x80) == 0x80)
  110. {
  111. std::stringstream ss;
  112. ss << std::hex << (out[0] & 0xff);
  113. std::string res(ss.str());
  114. out = "byte: \\x" + res;
  115. }
  116. return out;
  117. }
  118. // convert a vector of completion_token_output to json
  119. static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
  120. {
  121. json out = json::array();
  122. for (const auto &prob : probs)
  123. {
  124. json probs_for_token = json::array();
  125. for (const auto &p : prob.probs)
  126. {
  127. std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  128. probs_for_token.push_back(json{
  129. {"tok_str", tok_str},
  130. {"prob", p.prob},
  131. });
  132. }
  133. std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  134. out.push_back(json{
  135. {"content", tok_str},
  136. {"probs", probs_for_token},
  137. });
  138. }
  139. return out;
  140. }
  141. static bool server_verbose = false;
  142. #if SERVER_VERBOSE != 1
  143. #define LOG_VERBOSE(MSG, ...)
  144. #else
  145. #define LOG_VERBOSE(MSG, ...) \
  146. do \
  147. { \
  148. if (server_verbose) \
  149. { \
  150. server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
  151. } \
  152. } while (0)
  153. #endif
  154. #define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
  155. #define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
  156. #define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  157. struct llama_server_context
  158. {
  159. bool stream = false;
  160. bool has_next_token = false;
  161. std::string generated_text;
  162. std::vector<completion_token_output> generated_token_probs;
  163. size_t num_prompt_tokens = 0;
  164. size_t num_tokens_predicted = 0;
  165. size_t n_past = 0;
  166. size_t n_remain = 0;
  167. std::vector<llama_token> embd;
  168. std::vector<llama_token> last_n_tokens;
  169. llama_model *model = nullptr;
  170. llama_context *ctx = nullptr;
  171. gpt_params params;
  172. bool truncated = false;
  173. bool stopped_eos = false;
  174. bool stopped_word = false;
  175. bool stopped_limit = false;
  176. std::string stopping_word;
  177. int32_t multibyte_pending = 0;
  178. std::mutex mutex;
  179. std::unique_lock<std::mutex> lock()
  180. {
  181. return std::unique_lock<std::mutex>(mutex);
  182. }
  183. ~llama_server_context()
  184. {
  185. if (ctx)
  186. {
  187. llama_free(ctx);
  188. ctx = nullptr;
  189. }
  190. if (model)
  191. {
  192. llama_free_model(model);
  193. model = nullptr;
  194. }
  195. }
  196. void rewind()
  197. {
  198. params.antiprompt.clear();
  199. num_prompt_tokens = 0;
  200. num_tokens_predicted = 0;
  201. generated_text = "";
  202. generated_text.reserve(params.n_ctx);
  203. generated_token_probs.clear();
  204. truncated = false;
  205. stopped_eos = false;
  206. stopped_word = false;
  207. stopped_limit = false;
  208. stopping_word = "";
  209. multibyte_pending = 0;
  210. n_remain = 0;
  211. n_past = 0;
  212. }
  213. bool loadModel(const gpt_params &params_)
  214. {
  215. params = params_;
  216. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  217. if (model == nullptr)
  218. {
  219. LOG_ERROR("unable to load model", {{"model", params_.model}});
  220. return false;
  221. }
  222. last_n_tokens.resize(params.n_ctx);
  223. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  224. return true;
  225. }
  226. void loadPrompt()
  227. {
  228. params.prompt.insert(0, 1, ' '); // always add a first space
  229. std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
  230. num_prompt_tokens = prompt_tokens.size();
  231. if (params.n_keep < 0)
  232. {
  233. params.n_keep = (int)num_prompt_tokens;
  234. }
  235. params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
  236. // if input prompt is too big, truncate like normal
  237. if (num_prompt_tokens >= (size_t)params.n_ctx)
  238. {
  239. const int n_left = (params.n_ctx - params.n_keep) / 2;
  240. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  241. const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
  242. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  243. std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  244. LOG_VERBOSE("input truncated", {
  245. {"n_ctx", params.n_ctx},
  246. {"n_keep", params.n_keep},
  247. {"n_left", n_left},
  248. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  249. });
  250. truncated = true;
  251. prompt_tokens = new_tokens;
  252. }
  253. else
  254. {
  255. const size_t ps = num_prompt_tokens;
  256. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  257. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  258. }
  259. // compare the evaluated prompt with the new prompt
  260. n_past = common_part(embd, prompt_tokens);
  261. embd = prompt_tokens;
  262. if (n_past == num_prompt_tokens)
  263. {
  264. // we have to evaluate at least 1 token to generate logits.
  265. n_past--;
  266. }
  267. LOG_VERBOSE("prompt ingested", {
  268. {"n_past", n_past},
  269. {"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
  270. {"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  271. });
  272. has_next_token = true;
  273. }
  274. void beginCompletion()
  275. {
  276. // number of tokens to keep when resetting context
  277. n_remain = params.n_predict;
  278. llama_set_rng_seed(ctx, params.seed);
  279. }
  280. completion_token_output nextToken()
  281. {
  282. completion_token_output result;
  283. result.tok = -1;
  284. if (embd.size() >= (size_t)params.n_ctx)
  285. {
  286. // Reset context
  287. const int n_left = (params.n_ctx - params.n_keep) / 2;
  288. std::vector<llama_token> new_tokens(embd.begin(), embd.begin() + params.n_keep);
  289. new_tokens.insert(new_tokens.end(), embd.end() - n_left, embd.end());
  290. embd = new_tokens;
  291. n_past = params.n_keep;
  292. truncated = true;
  293. LOG_VERBOSE("input truncated", {
  294. {"n_ctx", params.n_ctx},
  295. {"n_keep", params.n_keep},
  296. {"n_left", n_left},
  297. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  298. });
  299. }
  300. while (n_past < embd.size())
  301. {
  302. int n_eval = (int)embd.size() - n_past;
  303. if (n_eval > params.n_batch)
  304. {
  305. n_eval = params.n_batch;
  306. }
  307. if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads))
  308. {
  309. LOG_ERROR("failed to eval", {
  310. {"n_eval", n_eval},
  311. {"n_past", n_past},
  312. {"n_threads", params.n_threads},
  313. {"embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
  314. });
  315. has_next_token = false;
  316. return result;
  317. }
  318. n_past += n_eval;
  319. }
  320. if (params.n_predict == 0)
  321. {
  322. has_next_token = false;
  323. result.tok = llama_token_eos();
  324. return result;
  325. }
  326. // out of user input, sample next token
  327. const float temp = params.temp;
  328. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  329. const float top_p = params.top_p;
  330. const float tfs_z = params.tfs_z;
  331. const float typical_p = params.typical_p;
  332. const int32_t repeat_last_n = params.repeat_last_n < 0 ? params.n_ctx : params.repeat_last_n;
  333. const float repeat_penalty = params.repeat_penalty;
  334. const float alpha_presence = params.presence_penalty;
  335. const float alpha_frequency = params.frequency_penalty;
  336. const int mirostat = params.mirostat;
  337. const float mirostat_tau = params.mirostat_tau;
  338. const float mirostat_eta = params.mirostat_eta;
  339. const bool penalize_nl = params.penalize_nl;
  340. const int32_t n_probs = params.n_probs;
  341. {
  342. auto *logits = llama_get_logits(ctx);
  343. auto n_vocab = llama_n_vocab(ctx);
  344. // Apply params.logit_bias map
  345. for (const auto &it : params.logit_bias)
  346. {
  347. logits[it.first] += it.second;
  348. }
  349. std::vector<llama_token_data> candidates;
  350. candidates.reserve(n_vocab);
  351. for (llama_token token_id = 0; token_id < n_vocab; token_id++)
  352. {
  353. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  354. }
  355. llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
  356. // Apply penalties
  357. float nl_logit = logits[llama_token_nl()];
  358. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
  359. llama_sample_repetition_penalty(ctx, &candidates_p,
  360. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  361. last_n_repeat, repeat_penalty);
  362. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  363. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  364. last_n_repeat, alpha_frequency, alpha_presence);
  365. if (!penalize_nl)
  366. {
  367. logits[llama_token_nl()] = nl_logit;
  368. }
  369. if (temp <= 0)
  370. {
  371. // Greedy sampling
  372. result.tok = llama_sample_token_greedy(ctx, &candidates_p);
  373. if (n_probs > 0)
  374. {
  375. llama_sample_softmax(ctx, &candidates_p);
  376. }
  377. }
  378. else
  379. {
  380. if (mirostat == 1)
  381. {
  382. static float mirostat_mu = 2.0f * mirostat_tau;
  383. const int mirostat_m = 100;
  384. llama_sample_temperature(ctx, &candidates_p, temp);
  385. result.tok = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  386. }
  387. else if (mirostat == 2)
  388. {
  389. static float mirostat_mu = 2.0f * mirostat_tau;
  390. llama_sample_temperature(ctx, &candidates_p, temp);
  391. result.tok = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  392. }
  393. else
  394. {
  395. // Temperature sampling
  396. size_t min_keep = std::max(1, n_probs);
  397. llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
  398. llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
  399. llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
  400. llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
  401. llama_sample_temperature(ctx, &candidates_p, temp);
  402. result.tok = llama_sample_token(ctx, &candidates_p);
  403. }
  404. }
  405. for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
  406. {
  407. result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
  408. }
  409. last_n_tokens.erase(last_n_tokens.begin());
  410. last_n_tokens.push_back(result.tok);
  411. num_tokens_predicted++;
  412. }
  413. // add it to the context
  414. embd.push_back(result.tok);
  415. // decrement remaining sampling budget
  416. --n_remain;
  417. if (!embd.empty() && embd.back() == llama_token_eos())
  418. {
  419. // stopping_word = llama_token_to_str(ctx, embd.back());
  420. has_next_token = false;
  421. stopped_eos = true;
  422. LOG_VERBOSE("eos token found", {});
  423. return result;
  424. }
  425. has_next_token = params.n_predict == -1 || n_remain != 0;
  426. return result;
  427. }
  428. size_t findStoppingStrings(const std::string &text, const size_t last_token_size,
  429. const stop_type type)
  430. {
  431. size_t stop_pos = std::string::npos;
  432. for (const std::string &word : params.antiprompt)
  433. {
  434. size_t pos;
  435. if (type == STOP_FULL)
  436. {
  437. const size_t tmp = word.size() + last_token_size;
  438. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  439. pos = text.find(word, from_pos);
  440. }
  441. else
  442. {
  443. pos = find_partial_stop_string(word, text);
  444. }
  445. if (pos != std::string::npos &&
  446. (stop_pos == std::string::npos || pos < stop_pos))
  447. {
  448. if (type == STOP_FULL)
  449. {
  450. stopping_word = word;
  451. stopped_word = true;
  452. has_next_token = false;
  453. }
  454. stop_pos = pos;
  455. }
  456. }
  457. return stop_pos;
  458. }
  459. completion_token_output doCompletion()
  460. {
  461. const completion_token_output token_with_probs = nextToken();
  462. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(ctx, token_with_probs.tok);
  463. generated_text += token_text;
  464. if (params.n_probs > 0)
  465. {
  466. generated_token_probs.push_back(token_with_probs);
  467. }
  468. if (multibyte_pending > 0)
  469. {
  470. multibyte_pending -= token_text.size();
  471. }
  472. else if (token_text.size() == 1)
  473. {
  474. const char c = token_text[0];
  475. // 2-byte characters: 110xxxxx 10xxxxxx
  476. if ((c & 0xE0) == 0xC0)
  477. {
  478. multibyte_pending = 1;
  479. // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
  480. }
  481. else if ((c & 0xF0) == 0xE0)
  482. {
  483. multibyte_pending = 2;
  484. // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
  485. }
  486. else if ((c & 0xF8) == 0xF0)
  487. {
  488. multibyte_pending = 3;
  489. }
  490. else
  491. {
  492. multibyte_pending = 0;
  493. }
  494. }
  495. if (multibyte_pending > 0 && !has_next_token)
  496. {
  497. has_next_token = true;
  498. n_remain++;
  499. }
  500. if (!has_next_token && n_remain == 0)
  501. {
  502. stopped_limit = true;
  503. }
  504. LOG_VERBOSE("next token", {
  505. {"token", token_with_probs.tok},
  506. {"token_text", tokens_to_output_formatted_string(ctx, token_with_probs.tok)},
  507. {"has_next_token", has_next_token},
  508. {"n_remain", n_remain},
  509. {"num_tokens_predicted", num_tokens_predicted},
  510. {"stopped_eos", stopped_eos},
  511. {"stopped_word", stopped_word},
  512. {"stopped_limit", stopped_limit},
  513. {"stopping_word", stopping_word},
  514. });
  515. return token_with_probs;
  516. }
  517. std::vector<float> getEmbedding()
  518. {
  519. static const int n_embd = llama_n_embd(ctx);
  520. if (!params.embedding)
  521. {
  522. LOG_WARNING("embedding disabled", {
  523. {"params.embedding", params.embedding},
  524. });
  525. return std::vector<float>(n_embd, 0.0f);
  526. }
  527. const float *data = llama_get_embeddings(ctx);
  528. std::vector<float> embedding(data, data + n_embd);
  529. return embedding;
  530. }
  531. };
  532. static void server_print_usage(const char *argv0, const gpt_params &params,
  533. const server_params &sparams)
  534. {
  535. fprintf(stderr, "usage: %s [options]\n", argv0);
  536. fprintf(stderr, "\n");
  537. fprintf(stderr, "options:\n");
  538. fprintf(stderr, " -h, --help show this help message and exit\n");
  539. fprintf(stderr, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  540. fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  541. fprintf(stderr, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  542. fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  543. fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  544. fprintf(stderr, " not recommended: doubles context memory required and no measurable increase in quality\n");
  545. if (llama_mlock_supported())
  546. {
  547. fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
  548. }
  549. if (llama_mmap_supported())
  550. {
  551. fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  552. }
  553. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  554. fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
  555. fprintf(stderr, " number of layers to store in VRAM\n");
  556. fprintf(stderr, " -ts SPLIT --tensor-split SPLIT\n");
  557. fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  558. fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  559. fprintf(stderr, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
  560. fprintf(stderr, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
  561. #endif
  562. fprintf(stderr, " -m FNAME, --model FNAME\n");
  563. fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
  564. fprintf(stderr, " -a ALIAS, --alias ALIAS\n");
  565. fprintf(stderr, " set an alias for the model, will be added as `model` field in completion response\n");
  566. fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  567. fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  568. fprintf(stderr, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  569. fprintf(stderr, " --port PORT port to listen (default (default: %d)\n", sparams.port);
  570. fprintf(stderr, " --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  571. fprintf(stderr, " -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  572. fprintf(stderr, " --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  573. fprintf(stderr, "\n");
  574. }
  575. static void server_params_parse(int argc, char **argv, server_params &sparams,
  576. gpt_params &params)
  577. {
  578. gpt_params default_params;
  579. server_params default_sparams;
  580. std::string arg;
  581. bool invalid_param = false;
  582. for (int i = 1; i < argc; i++)
  583. {
  584. arg = argv[i];
  585. if (arg == "--port")
  586. {
  587. if (++i >= argc)
  588. {
  589. invalid_param = true;
  590. break;
  591. }
  592. sparams.port = std::stoi(argv[i]);
  593. }
  594. else if (arg == "--host")
  595. {
  596. if (++i >= argc)
  597. {
  598. invalid_param = true;
  599. break;
  600. }
  601. sparams.hostname = argv[i];
  602. }
  603. else if (arg == "--path")
  604. {
  605. if (++i >= argc)
  606. {
  607. invalid_param = true;
  608. break;
  609. }
  610. sparams.public_path = argv[i];
  611. }
  612. else if (arg == "--timeout" || arg == "-to")
  613. {
  614. if (++i >= argc)
  615. {
  616. invalid_param = true;
  617. break;
  618. }
  619. sparams.read_timeout = std::stoi(argv[i]);
  620. sparams.write_timeout = std::stoi(argv[i]);
  621. }
  622. else if (arg == "-m" || arg == "--model")
  623. {
  624. if (++i >= argc)
  625. {
  626. invalid_param = true;
  627. break;
  628. }
  629. params.model = argv[i];
  630. }
  631. else if (arg == "-a" || arg == "--alias")
  632. {
  633. if (++i >= argc)
  634. {
  635. invalid_param = true;
  636. break;
  637. }
  638. params.model_alias = argv[i];
  639. }
  640. else if (arg == "-h" || arg == "--help")
  641. {
  642. server_print_usage(argv[0], default_params, default_sparams);
  643. exit(0);
  644. }
  645. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  646. {
  647. if (++i >= argc)
  648. {
  649. invalid_param = true;
  650. break;
  651. }
  652. params.n_ctx = std::stoi(argv[i]);
  653. }
  654. else if (arg == "--memory-f32" || arg == "--memory_f32")
  655. {
  656. params.memory_f16 = false;
  657. }
  658. else if (arg == "--threads" || arg == "-t")
  659. {
  660. if (++i >= argc)
  661. {
  662. invalid_param = true;
  663. break;
  664. }
  665. params.n_threads = std::stoi(argv[i]);
  666. }
  667. else if (arg == "-b" || arg == "--batch-size")
  668. {
  669. if (++i >= argc)
  670. {
  671. invalid_param = true;
  672. break;
  673. }
  674. params.n_batch = std::stoi(argv[i]);
  675. params.n_batch = std::min(512, params.n_batch);
  676. }
  677. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  678. {
  679. if (++i >= argc)
  680. {
  681. invalid_param = true;
  682. break;
  683. }
  684. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  685. params.n_gpu_layers = std::stoi(argv[i]);
  686. #else
  687. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  688. "See main README.md for information on enabling GPU BLAS support",
  689. {{"n_gpu_layers", params.n_gpu_layers}});
  690. #endif
  691. }
  692. else if (arg == "--tensor-split" || arg == "-ts")
  693. {
  694. if (++i >= argc)
  695. {
  696. invalid_param = true;
  697. break;
  698. }
  699. #ifdef GGML_USE_CUBLAS
  700. std::string arg_next = argv[i];
  701. // split string by , and /
  702. const std::regex regex{R"([,/]+)"};
  703. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  704. std::vector<std::string> split_arg{it, {}};
  705. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  706. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
  707. {
  708. if (i_device < split_arg.size())
  709. {
  710. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  711. }
  712. else
  713. {
  714. params.tensor_split[i_device] = 0.0f;
  715. }
  716. }
  717. #else
  718. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.", {});
  719. #endif // GGML_USE_CUBLAS
  720. }
  721. else if (arg == "--low-vram" || arg == "-lv")
  722. {
  723. #ifdef GGML_USE_CUBLAS
  724. params.low_vram = true;
  725. #else
  726. fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
  727. #endif // GGML_USE_CUBLAS
  728. }
  729. else if (arg == "--main-gpu" || arg == "-mg")
  730. {
  731. if (++i >= argc)
  732. {
  733. invalid_param = true;
  734. break;
  735. }
  736. #ifdef GGML_USE_CUBLAS
  737. params.main_gpu = std::stoi(argv[i]);
  738. #else
  739. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  740. #endif
  741. }
  742. else if (arg == "--lora")
  743. {
  744. if (++i >= argc)
  745. {
  746. invalid_param = true;
  747. break;
  748. }
  749. params.lora_adapter = argv[i];
  750. params.use_mmap = false;
  751. }
  752. else if (arg == "--lora-base")
  753. {
  754. if (++i >= argc)
  755. {
  756. invalid_param = true;
  757. break;
  758. }
  759. params.lora_base = argv[i];
  760. }
  761. else if (arg == "-v" || arg == "--verbose")
  762. {
  763. #if SERVER_VERBOSE != 1
  764. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  765. #else
  766. server_verbose = true;
  767. #endif
  768. }
  769. else if (arg == "--mlock")
  770. {
  771. params.use_mlock = true;
  772. }
  773. else if (arg == "--no-mmap")
  774. {
  775. params.use_mmap = false;
  776. }
  777. else if (arg == "--embedding")
  778. {
  779. params.embedding = true;
  780. }
  781. else
  782. {
  783. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  784. server_print_usage(argv[0], default_params, default_sparams);
  785. exit(1);
  786. }
  787. }
  788. if (invalid_param)
  789. {
  790. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  791. server_print_usage(argv[0], default_params, default_sparams);
  792. exit(1);
  793. }
  794. }
  795. static json format_generation_settings(llama_server_context &llama)
  796. {
  797. const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
  798. const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
  799. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  800. return json{
  801. {"n_ctx", llama.params.n_ctx},
  802. {"model", llama.params.model_alias},
  803. {"seed", llama.params.seed},
  804. {"temp", llama.params.temp},
  805. {"top_k", llama.params.top_k},
  806. {"top_p", llama.params.top_p},
  807. {"tfs_z", llama.params.tfs_z},
  808. {"typical_p", llama.params.typical_p},
  809. {"repeat_last_n", llama.params.repeat_last_n},
  810. {"repeat_penalty", llama.params.repeat_penalty},
  811. {"presence_penalty", llama.params.presence_penalty},
  812. {"frequency_penalty", llama.params.frequency_penalty},
  813. {"mirostat", llama.params.mirostat},
  814. {"mirostat_tau", llama.params.mirostat_tau},
  815. {"mirostat_eta", llama.params.mirostat_eta},
  816. {"penalize_nl", llama.params.penalize_nl},
  817. {"stop", llama.params.antiprompt},
  818. {"n_predict", llama.params.n_predict},
  819. {"n_keep", llama.params.n_keep},
  820. {"ignore_eos", ignore_eos},
  821. {"stream", llama.stream},
  822. {"logit_bias", llama.params.logit_bias},
  823. {"n_probs", llama.params.n_probs},
  824. };
  825. }
  826. static json format_embedding_response(llama_server_context &llama)
  827. {
  828. return json{
  829. {"embedding", llama.getEmbedding()},
  830. };
  831. }
  832. static json format_timings(llama_server_context &llama)
  833. {
  834. const auto timings = llama_get_timings(llama.ctx);
  835. assert(timings.n_eval == llama.num_tokens_predicted);
  836. return json{
  837. {"prompt_n", timings.n_eval},
  838. {"prompt_ms", timings.t_p_eval_ms},
  839. {"prompt_per_token_ms", timings.t_p_eval_ms / timings.n_p_eval},
  840. {"prompt_per_second", 1e3 / timings.t_p_eval_ms * timings.n_p_eval},
  841. {"predicted_n", timings.n_eval},
  842. {"predicted_ms", timings.t_eval_ms},
  843. {"predicted_per_token_ms", timings.t_eval_ms / timings.n_eval},
  844. {"predicted_per_second", 1e3 / timings.t_eval_ms * timings.n_eval},
  845. };
  846. }
  847. static json format_final_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  848. {
  849. json res = json{
  850. {"content", content},
  851. {"stop", true},
  852. {"model", llama.params.model_alias},
  853. {"tokens_predicted", llama.num_tokens_predicted},
  854. {"tokens_evaluated", llama.num_prompt_tokens},
  855. {"generation_settings", format_generation_settings(llama)},
  856. {"prompt", llama.params.prompt},
  857. {"truncated", llama.truncated},
  858. {"stopped_eos", llama.stopped_eos},
  859. {"stopped_word", llama.stopped_word},
  860. {"stopped_limit", llama.stopped_limit},
  861. {"stopping_word", llama.stopping_word},
  862. {"tokens_cached", llama.n_past},
  863. {"tokens_predicted", llama.num_tokens_predicted},
  864. {"timings", format_timings(llama)},
  865. };
  866. if (llama.params.n_probs > 0)
  867. {
  868. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  869. }
  870. return res;
  871. }
  872. static json format_partial_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
  873. {
  874. json res = json{
  875. {"content", content},
  876. {"stop", false},
  877. };
  878. if (llama.params.n_probs > 0)
  879. {
  880. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  881. }
  882. return res;
  883. }
  884. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  885. {
  886. return json{
  887. {"tokens", tokens}};
  888. }
  889. static void parse_options_completion(const json &body, llama_server_context &llama)
  890. {
  891. gpt_params default_params;
  892. llama.stream = body.value("stream", false);
  893. llama.params.n_predict = body.value("n_predict", default_params.n_predict);
  894. llama.params.top_k = body.value("top_k", default_params.top_k);
  895. llama.params.top_p = body.value("top_p", default_params.top_p);
  896. llama.params.tfs_z = body.value("tfs_z", default_params.tfs_z);
  897. llama.params.typical_p = body.value("typical_p", default_params.typical_p);
  898. llama.params.repeat_last_n = body.value("repeat_last_n", default_params.repeat_last_n);
  899. llama.params.temp = body.value("temperature", default_params.temp);
  900. llama.params.repeat_penalty = body.value("repeat_penalty", default_params.repeat_penalty);
  901. llama.params.presence_penalty = body.value("presence_penalty", default_params.presence_penalty);
  902. llama.params.frequency_penalty = body.value("frequency_penalty", default_params.frequency_penalty);
  903. llama.params.mirostat = body.value("mirostat", default_params.mirostat);
  904. llama.params.mirostat_tau = body.value("mirostat_tau", default_params.mirostat_tau);
  905. llama.params.mirostat_eta = body.value("mirostat_eta", default_params.mirostat_eta);
  906. llama.params.penalize_nl = body.value("penalize_nl", default_params.penalize_nl);
  907. llama.params.n_keep = body.value("n_keep", default_params.n_keep);
  908. llama.params.seed = body.value("seed", default_params.seed);
  909. llama.params.prompt = body.value("prompt", default_params.prompt);
  910. llama.params.n_probs = body.value("n_probs", default_params.n_probs);
  911. llama.params.logit_bias.clear();
  912. if (body.value("ignore_eos", false))
  913. {
  914. llama.params.logit_bias[llama_token_eos()] = -INFINITY;
  915. }
  916. const auto &logit_bias = body.find("logit_bias");
  917. if (logit_bias != body.end() && logit_bias->is_array())
  918. {
  919. const int n_vocab = llama_n_vocab(llama.ctx);
  920. for (const auto &el : *logit_bias)
  921. {
  922. if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
  923. {
  924. llama_token tok = el[0].get<llama_token>();
  925. if (tok >= 0 && tok < n_vocab)
  926. {
  927. if (el[1].is_number())
  928. {
  929. llama.params.logit_bias[tok] = el[1].get<float>();
  930. }
  931. else if (el[1].is_boolean() && !el[1].get<bool>())
  932. {
  933. llama.params.logit_bias[tok] = -INFINITY;
  934. }
  935. }
  936. }
  937. }
  938. }
  939. llama.params.antiprompt.clear();
  940. const auto &stop = body.find("stop");
  941. if (stop != body.end() && stop->is_array())
  942. {
  943. for (const auto &word : *stop)
  944. {
  945. if (!word.empty())
  946. {
  947. llama.params.antiprompt.push_back(word);
  948. }
  949. }
  950. }
  951. LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
  952. }
  953. static void log_server_request(const Request &req, const Response &res)
  954. {
  955. LOG_INFO("request", {
  956. {"remote_addr", req.remote_addr},
  957. {"remote_port", req.remote_port},
  958. {"status", res.status},
  959. {"method", req.method},
  960. {"path", req.path},
  961. {"params", req.params},
  962. });
  963. LOG_VERBOSE("request", {
  964. {"request", req.body},
  965. {"response", res.body},
  966. });
  967. }
  968. int main(int argc, char **argv)
  969. {
  970. // own arguments required by this example
  971. gpt_params params;
  972. server_params sparams;
  973. // struct that contains llama context and inference
  974. llama_server_context llama;
  975. server_params_parse(argc, argv, sparams, params);
  976. if (params.model_alias == "unknown")
  977. {
  978. params.model_alias = params.model;
  979. }
  980. llama_backend_init(params.numa);
  981. LOG_INFO("build info", {{"build", BUILD_NUMBER},
  982. {"commit", BUILD_COMMIT}});
  983. LOG_INFO("system info", {
  984. {"n_threads", params.n_threads},
  985. {"total_threads", std::thread::hardware_concurrency()},
  986. {"system_info", llama_print_system_info()},
  987. });
  988. // load the model
  989. if (!llama.loadModel(params))
  990. {
  991. return 1;
  992. }
  993. Server svr;
  994. svr.set_default_headers({{"Server", "llama.cpp"},
  995. {"Access-Control-Allow-Origin", "*"},
  996. {"Access-Control-Allow-Headers", "content-type"}});
  997. // this is only called if no index.html is found in the public --path
  998. svr.Get("/", [](const Request &, Response &res)
  999. {
  1000. res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html");
  1001. return false; });
  1002. // this is only called if no index.js is found in the public --path
  1003. svr.Get("/index.js", [](const Request &, Response &res)
  1004. {
  1005. res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript");
  1006. return false; });
  1007. // this is only called if no index.html is found in the public --path
  1008. svr.Get("/completion.js", [](const Request &, Response &res)
  1009. {
  1010. res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
  1011. return false; });
  1012. svr.Post("/completion", [&llama](const Request &req, Response &res)
  1013. {
  1014. auto lock = llama.lock();
  1015. llama.rewind();
  1016. llama_reset_timings(llama.ctx);
  1017. parse_options_completion(json::parse(req.body), llama);
  1018. llama.loadPrompt();
  1019. llama.beginCompletion();
  1020. if (!llama.stream) {
  1021. size_t stop_pos = std::string::npos;
  1022. while (llama.has_next_token) {
  1023. const completion_token_output token_with_probs = llama.doCompletion();
  1024. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
  1025. stop_pos = llama.findStoppingStrings(llama.generated_text,
  1026. token_text.size(), STOP_FULL);
  1027. }
  1028. if (stop_pos == std::string::npos) {
  1029. stop_pos = llama.findStoppingStrings(llama.generated_text, 0, STOP_PARTIAL);
  1030. }
  1031. if (stop_pos != std::string::npos) {
  1032. llama.generated_text.erase(llama.generated_text.begin() + stop_pos,
  1033. llama.generated_text.end());
  1034. }
  1035. const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
  1036. llama_print_timings(llama.ctx);
  1037. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
  1038. "application/json");
  1039. } else {
  1040. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  1041. size_t sent_count = 0;
  1042. size_t sent_token_probs_index = 0;
  1043. while (llama.has_next_token) {
  1044. const completion_token_output token_with_probs = llama.doCompletion();
  1045. const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
  1046. if (llama.multibyte_pending > 0) {
  1047. continue;
  1048. }
  1049. size_t pos = std::min(sent_count, llama.generated_text.size());
  1050. const std::string str_test = llama.generated_text.substr(pos);
  1051. size_t stop_pos =
  1052. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  1053. if (stop_pos != std::string::npos) {
  1054. llama.generated_text.erase(
  1055. llama.generated_text.begin() + pos + stop_pos,
  1056. llama.generated_text.end());
  1057. pos = std::min(sent_count, llama.generated_text.size());
  1058. } else {
  1059. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  1060. STOP_PARTIAL);
  1061. }
  1062. const std::string to_send = llama.generated_text.substr(pos, stop_pos);
  1063. sent_count += to_send.size();
  1064. std::vector<completion_token_output> probs_output = {};
  1065. if (llama.params.n_probs > 0) {
  1066. const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
  1067. size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
  1068. size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
  1069. if (probs_pos < probs_stop_pos) {
  1070. probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
  1071. }
  1072. sent_token_probs_index = probs_stop_pos;
  1073. }
  1074. const json data = llama.has_next_token
  1075. ? format_partial_response(llama, to_send, probs_output)
  1076. // Generation is done, send extra information.
  1077. : format_final_response(llama, to_send, llama.generated_token_probs);
  1078. const std::string str =
  1079. "data: " +
  1080. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  1081. "\n\n";
  1082. LOG_VERBOSE("data stream", {
  1083. { "to_send", str }
  1084. });
  1085. if (!sink.write(str.data(), str.size())) {
  1086. LOG_VERBOSE("stream closed", {});
  1087. llama_print_timings(llama.ctx);
  1088. return false;
  1089. }
  1090. }
  1091. llama_print_timings(llama.ctx);
  1092. sink.done();
  1093. return true;
  1094. };
  1095. res.set_chunked_content_provider("text/event-stream", chunked_content_provider);
  1096. } });
  1097. svr.Get("/model.json", [&llama](const Request &, Response &res)
  1098. {
  1099. const json data = format_generation_settings(llama);
  1100. return res.set_content(data.dump(), "application/json"); });
  1101. svr.Options(R"(/.*)", [](const Request &, Response &res)
  1102. { return res.set_content("", "application/json"); });
  1103. svr.Post("/tokenize", [&llama](const Request &req, Response &res)
  1104. {
  1105. auto lock = llama.lock();
  1106. const json body = json::parse(req.body);
  1107. const std::string content = body.value("content", "");
  1108. const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
  1109. const json data = format_tokenizer_response(tokens);
  1110. return res.set_content(data.dump(), "application/json"); });
  1111. svr.Post("/embedding", [&llama](const Request &req, Response &res)
  1112. {
  1113. auto lock = llama.lock();
  1114. const json body = json::parse(req.body);
  1115. llama.rewind();
  1116. llama_reset_timings(llama.ctx);
  1117. llama.params.prompt = body.value("content", "");
  1118. llama.params.n_predict = 0;
  1119. llama.loadPrompt();
  1120. llama.beginCompletion();
  1121. llama.doCompletion();
  1122. const json data = format_embedding_response(llama);
  1123. return res.set_content(data.dump(), "application/json"); });
  1124. svr.set_logger(log_server_request);
  1125. svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep)
  1126. {
  1127. const auto * fmt = "500 Internal Server Error\n%s";
  1128. char buf[BUFSIZ];
  1129. try {
  1130. std::rethrow_exception(std::move(ep));
  1131. } catch (std::exception & e) {
  1132. snprintf(buf, sizeof(buf), fmt, e.what());
  1133. } catch (...) {
  1134. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  1135. }
  1136. res.set_content(buf, "text/plain");
  1137. res.status = 500; });
  1138. svr.set_error_handler([](const Request &, Response &res)
  1139. {
  1140. res.set_content("File Not Found", "text/plain");
  1141. res.status = 404; });
  1142. // set timeouts and change hostname and port
  1143. svr.set_read_timeout(sparams.read_timeout);
  1144. svr.set_write_timeout(sparams.write_timeout);
  1145. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  1146. {
  1147. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  1148. return 1;
  1149. }
  1150. // Set the base directory for serving static files
  1151. svr.set_base_dir(sparams.public_path);
  1152. // to make it ctrl+clickable:
  1153. fprintf(stdout, "\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
  1154. LOG_INFO("HTTP server listening", {
  1155. {"hostname", sparams.hostname},
  1156. {"port", sparams.port},
  1157. });
  1158. if (!svr.listen_after_bind())
  1159. {
  1160. return 1;
  1161. }
  1162. llama_backend_free();
  1163. return 0;
  1164. }