1
0

server.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. // single thread
  5. #define CPPHTTPLIB_THREAD_POOL_COUNT 1
  6. #ifndef NDEBUG
  7. // crash the server in debug mode, otherwise send an http 500 error
  8. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  9. #endif
  10. #include "httplib.h"
  11. #include "json.hpp"
  12. #ifndef SERVER_VERBOSE
  13. #define SERVER_VERBOSE 1
  14. #endif
  15. using namespace httplib;
  16. using json = nlohmann::json;
  17. struct server_params {
  18. std::string hostname = "127.0.0.1";
  19. int32_t port = 8080;
  20. int32_t read_timeout = 600;
  21. int32_t write_timeout = 600;
  22. };
  23. static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
  24. size_t i;
  25. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  26. return i;
  27. }
  28. enum stop_type {
  29. STOP_FULL,
  30. STOP_PARTIAL,
  31. };
  32. static bool ends_with(const std::string & str, const std::string & suffix) {
  33. return str.size() >= suffix.size() &&
  34. 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  35. }
  36. static size_t find_partial_stop_string(const std::string & stop,
  37. const std::string & text) {
  38. if (!text.empty() && !stop.empty()) {
  39. const char text_last_char = text.back();
  40. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  41. if (stop[char_index] == text_last_char) {
  42. const std::string current_partial = stop.substr(0, char_index + 1);
  43. if (ends_with(text, current_partial)) {
  44. return text.size() - char_index - 1;
  45. }
  46. }
  47. }
  48. }
  49. return std::string::npos;
  50. }
  51. template<class Iter>
  52. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  53. std::string ret;
  54. for (; begin != end; ++begin) {
  55. ret += llama_token_to_str(ctx, *begin);
  56. }
  57. return ret;
  58. }
  59. static void server_log(const char * level, const char * function, int line,
  60. const char * message, const nlohmann::ordered_json & extra) {
  61. nlohmann::ordered_json log {
  62. { "timestamp", time(nullptr) },
  63. { "level", level },
  64. { "function", function },
  65. { "line", line },
  66. { "message", message },
  67. };
  68. if (!extra.empty()) {
  69. log.merge_patch(extra);
  70. }
  71. const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
  72. fprintf(stdout, "%.*s\n", (int)str.size(), str.data());
  73. fflush(stdout);
  74. }
  75. static bool server_verbose = false;
  76. #if SERVER_VERBOSE != 1
  77. # define LOG_VERBOSE(MSG, ...)
  78. #else
  79. # define LOG_VERBOSE(MSG, ...) \
  80. do { \
  81. if (server_verbose) { \
  82. server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
  83. } \
  84. } while(0)
  85. #endif
  86. #define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
  87. #define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
  88. #define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  89. struct llama_server_context {
  90. bool stream = false;
  91. bool has_next_token = false;
  92. std::string generated_text;
  93. size_t num_tokens_predicted = 0;
  94. size_t n_past = 0;
  95. size_t n_remain = 0;
  96. std::vector<llama_token> embd;
  97. std::vector<llama_token> last_n_tokens;
  98. llama_context * ctx = nullptr;
  99. gpt_params params;
  100. bool truncated = false;
  101. bool stopped_eos = false;
  102. bool stopped_word = false;
  103. bool stopped_limit = false;
  104. std::string stopping_word;
  105. int32_t multibyte_pending = 0;
  106. ~llama_server_context() {
  107. if (ctx) {
  108. llama_free(ctx);
  109. ctx = nullptr;
  110. }
  111. }
  112. void rewind() {
  113. params.antiprompt.clear();
  114. num_tokens_predicted = 0;
  115. generated_text = "";
  116. generated_text.reserve(params.n_ctx);
  117. truncated = false;
  118. stopped_eos = false;
  119. stopped_word = false;
  120. stopped_limit = false;
  121. stopping_word = "";
  122. multibyte_pending = 0;
  123. n_remain = 0;
  124. n_past = 0;
  125. }
  126. bool loadModel(const gpt_params & params_) {
  127. params = params_;
  128. ctx = llama_init_from_gpt_params(params);
  129. if (ctx == nullptr) {
  130. LOG_ERROR("unable to load model", { { "model", params_.model } });
  131. return false;
  132. }
  133. last_n_tokens.resize(params.n_ctx);
  134. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  135. return true;
  136. }
  137. void loadPrompt() {
  138. params.prompt.insert(0, 1, ' '); // always add a first space
  139. std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
  140. if (params.n_keep < 0) {
  141. params.n_keep = (int)prompt_tokens.size();
  142. }
  143. params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
  144. // if input prompt is too big, truncate like normal
  145. if (prompt_tokens.size() >= (size_t)params.n_ctx) {
  146. const int n_left = (params.n_ctx - params.n_keep) / 2;
  147. std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
  148. const int erased_blocks = (prompt_tokens.size() - params.n_keep - n_left - 1) / n_left;
  149. new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
  150. std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
  151. LOG_VERBOSE("input truncated", {
  152. { "n_ctx", params.n_ctx },
  153. { "n_keep", params.n_keep },
  154. { "n_left", n_left },
  155. { "new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend()) },
  156. });
  157. truncated = true;
  158. prompt_tokens = new_tokens;
  159. } else {
  160. const size_t ps = prompt_tokens.size();
  161. std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
  162. std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
  163. }
  164. // compare the evaluated prompt with the new prompt
  165. n_past = common_part(embd, prompt_tokens);
  166. embd = prompt_tokens;
  167. if (n_past == prompt_tokens.size()) {
  168. // we have to evaluate at least 1 token to generate logits.
  169. n_past--;
  170. }
  171. LOG_VERBOSE("prompt ingested", {
  172. { "n_past", n_past },
  173. { "cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past) },
  174. { "to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend()) },
  175. });
  176. has_next_token = true;
  177. }
  178. void beginCompletion() {
  179. // number of tokens to keep when resetting context
  180. n_remain = params.n_predict;
  181. llama_set_rng_seed(ctx, params.seed);
  182. }
  183. llama_token nextToken() {
  184. llama_token result = -1;
  185. if (embd.size() >= (size_t)params.n_ctx) {
  186. // Reset context
  187. const int n_left = (params.n_ctx - params.n_keep) / 2;
  188. std::vector<llama_token> new_tokens(embd.begin(), embd.begin() + params.n_keep);
  189. new_tokens.insert(new_tokens.end(), embd.end() - n_left, embd.end());
  190. embd = new_tokens;
  191. n_past = params.n_keep;
  192. truncated = true;
  193. LOG_VERBOSE("input truncated", {
  194. { "n_ctx", params.n_ctx },
  195. { "n_keep", params.n_keep },
  196. { "n_left", n_left },
  197. { "new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend()) },
  198. });
  199. }
  200. while (n_past < embd.size()) {
  201. int n_eval = (int)embd.size() - n_past;
  202. if (n_eval > params.n_batch) {
  203. n_eval = params.n_batch;
  204. }
  205. if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads)) {
  206. LOG_ERROR("failed to eval", {
  207. { "n_eval", n_eval },
  208. { "n_past", n_past },
  209. { "n_threads", params.n_threads },
  210. { "embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend()) },
  211. });
  212. has_next_token = false;
  213. return result;
  214. }
  215. n_past += n_eval;
  216. }
  217. // out of user input, sample next token
  218. const float temp = params.temp;
  219. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  220. const float top_p = params.top_p;
  221. const float tfs_z = params.tfs_z;
  222. const float typical_p = params.typical_p;
  223. const int32_t repeat_last_n = params.repeat_last_n < 0 ? params.n_ctx : params.repeat_last_n;
  224. const float repeat_penalty = params.repeat_penalty;
  225. const float alpha_presence = params.presence_penalty;
  226. const float alpha_frequency = params.frequency_penalty;
  227. const int mirostat = params.mirostat;
  228. const float mirostat_tau = params.mirostat_tau;
  229. const float mirostat_eta = params.mirostat_eta;
  230. const bool penalize_nl = params.penalize_nl;
  231. llama_token id = 0;
  232. {
  233. auto * logits = llama_get_logits(ctx);
  234. auto n_vocab = llama_n_vocab(ctx);
  235. // Apply params.logit_bias map
  236. for (const auto & it : params.logit_bias) {
  237. logits[it.first] += it.second;
  238. }
  239. std::vector<llama_token_data> candidates;
  240. candidates.reserve(n_vocab);
  241. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  242. candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f });
  243. }
  244. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  245. // Apply penalties
  246. float nl_logit = logits[llama_token_nl()];
  247. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
  248. llama_sample_repetition_penalty(ctx, &candidates_p,
  249. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  250. last_n_repeat, repeat_penalty);
  251. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  252. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  253. last_n_repeat, alpha_frequency, alpha_presence);
  254. if (!penalize_nl) {
  255. logits[llama_token_nl()] = nl_logit;
  256. }
  257. if (temp <= 0) {
  258. // Greedy sampling
  259. id = llama_sample_token_greedy(ctx, &candidates_p);
  260. } else {
  261. if (mirostat == 1) {
  262. static float mirostat_mu = 2.0f * mirostat_tau;
  263. const int mirostat_m = 100;
  264. llama_sample_temperature(ctx, &candidates_p, temp);
  265. id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  266. } else if (mirostat == 2) {
  267. static float mirostat_mu = 2.0f * mirostat_tau;
  268. llama_sample_temperature(ctx, &candidates_p, temp);
  269. id = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  270. } else {
  271. // Temperature sampling
  272. llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
  273. llama_sample_typical(ctx, &candidates_p, typical_p, 1);
  274. llama_sample_top_p(ctx, &candidates_p, top_p, 1);
  275. llama_sample_top_k(ctx, &candidates_p, top_k, 1);
  276. llama_sample_temperature(ctx, &candidates_p, temp);
  277. id = llama_sample_token(ctx, &candidates_p);
  278. }
  279. }
  280. last_n_tokens.erase(last_n_tokens.begin());
  281. last_n_tokens.push_back(id);
  282. num_tokens_predicted++;
  283. }
  284. // add it to the context
  285. embd.push_back(id);
  286. result = id;
  287. // decrement remaining sampling budget
  288. --n_remain;
  289. if (!embd.empty() && embd.back() == llama_token_eos()) {
  290. //stopping_word = llama_token_to_str(ctx, embd.back());
  291. has_next_token = false;
  292. stopped_eos = true;
  293. LOG_VERBOSE("eos token found", {});
  294. return result;
  295. }
  296. has_next_token = params.n_predict == -1 || n_remain != 0;
  297. return result;
  298. }
  299. size_t findStoppingStrings(const std::string & text, const size_t last_token_size,
  300. const stop_type type) {
  301. size_t stop_pos = std::string::npos;
  302. for (const std::string & word : params.antiprompt) {
  303. size_t pos;
  304. if (type == STOP_FULL) {
  305. const size_t tmp = word.size() + last_token_size;
  306. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  307. pos = text.find(word, from_pos);
  308. }
  309. else {
  310. pos = find_partial_stop_string(word, text);
  311. }
  312. if (pos != std::string::npos &&
  313. (stop_pos == std::string::npos || pos < stop_pos)) {
  314. if (type == STOP_FULL) {
  315. stopping_word = word;
  316. stopped_word = true;
  317. has_next_token = false;
  318. }
  319. stop_pos = pos;
  320. }
  321. }
  322. return stop_pos;
  323. }
  324. std::string doCompletion() {
  325. const llama_token token = nextToken();
  326. const std::string token_text = token == -1 ? "" : llama_token_to_str(ctx, token);
  327. generated_text += token_text;
  328. if (multibyte_pending > 0) {
  329. multibyte_pending -= token_text.size();
  330. } else if (token_text.size() == 1) {
  331. const char c = token_text[0];
  332. // 2-byte characters: 110xxxxx 10xxxxxx
  333. if ((c & 0xE0) == 0xC0) {
  334. multibyte_pending = 1;
  335. // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
  336. } else if ((c & 0xF0) == 0xE0) {
  337. multibyte_pending = 2;
  338. // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
  339. } else if ((c & 0xF8) == 0xF0) {
  340. multibyte_pending = 3;
  341. } else {
  342. multibyte_pending = 0;
  343. }
  344. }
  345. if (multibyte_pending > 0 && !has_next_token) {
  346. has_next_token = true;
  347. n_remain++;
  348. }
  349. if (!has_next_token && n_remain == 0) {
  350. stopped_limit = true;
  351. }
  352. LOG_VERBOSE("next token", {
  353. { "token", token },
  354. { "token_text", llama_token_to_str(ctx, token) },
  355. { "has_next_token", has_next_token },
  356. { "n_remain", n_remain },
  357. { "num_tokens_predicted", num_tokens_predicted },
  358. { "stopped_eos", stopped_eos },
  359. { "stopped_word", stopped_word },
  360. { "stopped_limit", stopped_limit },
  361. { "stopping_word", stopping_word },
  362. });
  363. return token_text;
  364. }
  365. };
  366. static void server_print_usage(const char * argv0, const gpt_params & params,
  367. const server_params & sparams) {
  368. fprintf(stderr, "usage: %s [options]\n", argv0);
  369. fprintf(stderr, "\n");
  370. fprintf(stderr, "options:\n");
  371. fprintf(stderr, " -h, --help show this help message and exit\n");
  372. fprintf(stderr, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  373. fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  374. fprintf(stderr, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  375. fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  376. fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  377. fprintf(stderr, " not recommended: doubles context memory required and no measurable increase in quality\n");
  378. if (llama_mlock_supported()) {
  379. fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
  380. }
  381. if (llama_mmap_supported()) {
  382. fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  383. }
  384. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  385. fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
  386. fprintf(stderr, " number of layers to store in VRAM\n");
  387. fprintf(stderr, " -ts SPLIT --tensor-split SPLIT\n");
  388. fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  389. fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
  390. fprintf(stderr, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
  391. fprintf(stderr, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
  392. #endif
  393. fprintf(stderr, " -m FNAME, --model FNAME\n");
  394. fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
  395. fprintf(stderr, " -a ALIAS, --alias ALIAS\n");
  396. fprintf(stderr, " set an alias for the model, will be added as `model` field in completion response\n");
  397. fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  398. fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  399. fprintf(stderr, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  400. fprintf(stderr, " --port PORT port to listen (default (default: %d)\n", sparams.port);
  401. fprintf(stderr, " -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  402. fprintf(stderr, "\n");
  403. }
  404. static void server_params_parse(int argc, char ** argv, server_params & sparams,
  405. gpt_params & params) {
  406. gpt_params default_params;
  407. server_params default_sparams;
  408. std::string arg;
  409. bool invalid_param = false;
  410. for (int i = 1; i < argc; i++) {
  411. arg = argv[i];
  412. if (arg == "--port") {
  413. if (++i >= argc) {
  414. invalid_param = true;
  415. break;
  416. }
  417. sparams.port = std::stoi(argv[i]);
  418. } else if (arg == "--host") {
  419. if (++i >= argc) {
  420. invalid_param = true;
  421. break;
  422. }
  423. sparams.hostname = argv[i];
  424. } else if (arg == "--timeout" || arg == "-to") {
  425. if (++i >= argc) {
  426. invalid_param = true;
  427. break;
  428. }
  429. sparams.read_timeout = std::stoi(argv[i]);
  430. sparams.write_timeout = std::stoi(argv[i]);
  431. } else if (arg == "-m" || arg == "--model") {
  432. if (++i >= argc) {
  433. invalid_param = true;
  434. break;
  435. }
  436. params.model = argv[i];
  437. } else if (arg == "-a" || arg == "--alias") {
  438. if (++i >= argc) {
  439. invalid_param = true;
  440. break;
  441. }
  442. params.model_alias = argv[i];
  443. } else if (arg == "-h" || arg == "--help") {
  444. server_print_usage(argv[0], default_params, default_sparams);
  445. exit(0);
  446. } else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size") {
  447. if (++i >= argc) {
  448. invalid_param = true;
  449. break;
  450. }
  451. params.n_ctx = std::stoi(argv[i]);
  452. } else if (arg == "--memory-f32" || arg == "--memory_f32") {
  453. params.memory_f16 = false;
  454. } else if (arg == "--threads" || arg == "-t") {
  455. if (++i >= argc) {
  456. invalid_param = true;
  457. break;
  458. }
  459. params.n_threads = std::stoi(argv[i]);
  460. } else if (arg == "-b" || arg == "--batch-size") {
  461. if (++i >= argc) {
  462. invalid_param = true;
  463. break;
  464. }
  465. params.n_batch = std::stoi(argv[i]);
  466. params.n_batch = std::min(512, params.n_batch);
  467. } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
  468. if (++i >= argc) {
  469. invalid_param = true;
  470. break;
  471. }
  472. #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
  473. params.n_gpu_layers = std::stoi(argv[i]);
  474. #else
  475. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  476. "See main README.md for information on enabling GPU BLAS support", { { "n_gpu_layers", params.n_gpu_layers } });
  477. #endif
  478. }
  479. else if (arg == "--tensor-split" || arg == "-ts") {
  480. if (++i >= argc) {
  481. invalid_param = true;
  482. break;
  483. }
  484. #ifdef GGML_USE_CUBLAS
  485. std::string arg_next = argv[i];
  486. // split string by , and /
  487. const std::regex regex{ R"([,/]+)" };
  488. std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
  489. std::vector<std::string> split_arg{ it, {} };
  490. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  491. for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device) {
  492. if (i_device < split_arg.size()) {
  493. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  494. }
  495. else {
  496. params.tensor_split[i_device] = 0.0f;
  497. }
  498. }
  499. #else
  500. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.", {});
  501. #endif // GGML_USE_CUBLAS
  502. }
  503. else if (arg == "--low-vram" || arg == "-lv")
  504. {
  505. #ifdef GGML_USE_CUBLAS
  506. params.low_vram = true;
  507. #else
  508. fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
  509. #endif // GGML_USE_CUBLAS
  510. }
  511. else if (arg == "--main-gpu" || arg == "-mg") {
  512. if (++i >= argc) {
  513. invalid_param = true;
  514. break;
  515. }
  516. #ifdef GGML_USE_CUBLAS
  517. params.main_gpu = std::stoi(argv[i]);
  518. #else
  519. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  520. #endif
  521. } else if (arg == "--lora") {
  522. if (++i >= argc) {
  523. invalid_param = true;
  524. break;
  525. }
  526. params.lora_adapter = argv[i];
  527. params.use_mmap = false;
  528. } else if (arg == "--lora-base") {
  529. if (++i >= argc) {
  530. invalid_param = true;
  531. break;
  532. }
  533. params.lora_base = argv[i];
  534. } else if (arg == "-v" || arg == "--verbose") {
  535. #if SERVER_VERBOSE != 1
  536. LOG_WARNING("server.cpp is not built with verbose logging.", {});
  537. #else
  538. server_verbose = true;
  539. #endif
  540. } else if (arg == "--mlock") {
  541. params.use_mlock = true;
  542. } else if (arg == "--no-mmap") {
  543. params.use_mmap = false;
  544. } else {
  545. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  546. server_print_usage(argv[0], default_params, default_sparams);
  547. exit(1);
  548. }
  549. }
  550. if (invalid_param) {
  551. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  552. server_print_usage(argv[0], default_params, default_sparams);
  553. exit(1);
  554. }
  555. }
  556. static json format_generation_settings(llama_server_context & llama) {
  557. const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
  558. const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
  559. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  560. return json {
  561. { "seed", llama.params.seed },
  562. { "temp", llama.params.temp },
  563. { "top_k", llama.params.top_k },
  564. { "top_p", llama.params.top_p },
  565. { "tfs_z", llama.params.tfs_z },
  566. { "typical_p", llama.params.typical_p },
  567. { "repeat_last_n", llama.params.repeat_last_n },
  568. { "repeat_penalty", llama.params.repeat_penalty },
  569. { "presence_penalty", llama.params.presence_penalty },
  570. { "frequency_penalty", llama.params.frequency_penalty },
  571. { "mirostat", llama.params.mirostat },
  572. { "mirostat_tau", llama.params.mirostat_tau },
  573. { "mirostat_eta", llama.params.mirostat_eta },
  574. { "penalize_nl", llama.params.penalize_nl },
  575. { "stop", llama.params.antiprompt },
  576. { "n_predict", llama.params.n_predict },
  577. { "n_keep", llama.params.n_keep },
  578. { "ignore_eos", ignore_eos },
  579. { "stream", llama.stream },
  580. { "logit_bias", llama.params.logit_bias },
  581. };
  582. }
  583. static json format_final_response(llama_server_context & llama, const std::string & content) {
  584. return json {
  585. { "content", content },
  586. { "stop", true },
  587. { "model", llama.params.model_alias },
  588. { "tokens_predicted", llama.num_tokens_predicted },
  589. { "generation_settings", format_generation_settings(llama) },
  590. { "prompt", llama.params.prompt },
  591. { "truncated", llama.truncated },
  592. { "stopped_eos", llama.stopped_eos },
  593. { "stopped_word", llama.stopped_word },
  594. { "stopped_limit", llama.stopped_limit },
  595. { "stopping_word", llama.stopping_word },
  596. };
  597. }
  598. static json format_partial_response(const std::string & content) {
  599. return json {
  600. { "content", content },
  601. { "stop", false },
  602. };
  603. }
  604. static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
  605. return json {
  606. { "tokens", tokens }
  607. };
  608. }
  609. static void parse_options_completion(const json & body, llama_server_context & llama) {
  610. gpt_params default_params;
  611. llama.stream = body.value("stream", false);
  612. llama.params.n_predict = body.value("n_predict", default_params.n_predict);
  613. llama.params.top_k = body.value("top_k", default_params.top_k);
  614. llama.params.top_p = body.value("top_p", default_params.top_p);
  615. llama.params.tfs_z = body.value("tfs_z", default_params.tfs_z);
  616. llama.params.typical_p = body.value("typical_p", default_params.typical_p);
  617. llama.params.repeat_last_n = body.value("repeat_last_n", default_params.repeat_last_n);
  618. llama.params.temp = body.value("temperature", default_params.temp);
  619. llama.params.repeat_penalty = body.value("repeat_penalty", default_params.repeat_penalty);
  620. llama.params.presence_penalty = body.value("presence_penalty", default_params.presence_penalty);
  621. llama.params.frequency_penalty = body.value("frequency_penalty", default_params.frequency_penalty);
  622. llama.params.mirostat = body.value("mirostat", default_params.mirostat);
  623. llama.params.mirostat_tau = body.value("mirostat_tau", default_params.mirostat_tau);
  624. llama.params.mirostat_eta = body.value("mirostat_eta", default_params.mirostat_eta);
  625. llama.params.penalize_nl = body.value("penalize_nl", default_params.penalize_nl);
  626. llama.params.n_keep = body.value("n_keep", default_params.n_keep);
  627. llama.params.seed = body.value("seed", default_params.seed);
  628. llama.params.prompt = body.value("prompt", default_params.prompt);
  629. llama.params.logit_bias.clear();
  630. if (body.value("ignore_eos", false)) {
  631. llama.params.logit_bias[llama_token_eos()] = -INFINITY;
  632. }
  633. const auto & logit_bias = body.find("logit_bias");
  634. if (logit_bias != body.end() && logit_bias->is_array()) {
  635. const int n_vocab = llama_n_vocab(llama.ctx);
  636. for (const auto & el : *logit_bias) {
  637. if (el.is_array() && el.size() == 2 && el[0].is_number_integer()) {
  638. llama_token tok = el[0].get<llama_token>();
  639. if (tok >= 0 && tok < n_vocab) {
  640. if (el[1].is_number()) {
  641. llama.params.logit_bias[tok] = el[1].get<float>();
  642. } else if (el[1].is_boolean() && !el[1].get<bool>()) {
  643. llama.params.logit_bias[tok] = -INFINITY;
  644. }
  645. }
  646. }
  647. }
  648. }
  649. llama.params.antiprompt.clear();
  650. const auto & stop = body.find("stop");
  651. if (stop != body.end() && stop->is_array()) {
  652. for (const auto & word : *stop) {
  653. if (!word.empty()) {
  654. llama.params.antiprompt.push_back(word);
  655. }
  656. }
  657. }
  658. LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
  659. }
  660. static void log_server_request(const Request & req, const Response & res) {
  661. LOG_INFO("request", {
  662. { "remote_addr", req.remote_addr },
  663. { "remote_port", req.remote_port },
  664. { "status", res.status },
  665. { "path", req.path },
  666. { "request", req.body },
  667. { "response", res.body },
  668. });
  669. }
  670. int main(int argc, char ** argv) {
  671. // own arguments required by this example
  672. gpt_params params;
  673. server_params sparams;
  674. // struct that contains llama context and inference
  675. llama_server_context llama;
  676. server_params_parse(argc, argv, sparams, params);
  677. if (params.model_alias == "unknown") {
  678. params.model_alias = params.model;
  679. }
  680. llama_init_backend();
  681. LOG_INFO("build info", {
  682. { "build", BUILD_NUMBER },
  683. { "commit", BUILD_COMMIT }
  684. });
  685. LOG_INFO("system info", {
  686. { "n_threads", params.n_threads },
  687. { "total_threads", std::thread::hardware_concurrency() },
  688. { "system_info", llama_print_system_info() },
  689. });
  690. // load the model
  691. if (!llama.loadModel(params)) {
  692. return 1;
  693. }
  694. Server svr;
  695. svr.set_default_headers({
  696. { "Access-Control-Allow-Origin", "*" },
  697. { "Access-Control-Allow-Headers", "content-type" }
  698. });
  699. svr.Get("/", [](const Request &, Response & res) {
  700. res.set_content("<h1>llama.cpp server works</h1>", "text/html");
  701. });
  702. svr.Post("/completion", [&llama](const Request & req, Response & res) {
  703. llama.rewind();
  704. llama_reset_timings(llama.ctx);
  705. parse_options_completion(json::parse(req.body), llama);
  706. llama.loadPrompt();
  707. llama.beginCompletion();
  708. if (!llama.stream) {
  709. size_t stop_pos = std::string::npos;
  710. while (llama.has_next_token) {
  711. const std::string token_text = llama.doCompletion();
  712. stop_pos = llama.findStoppingStrings(llama.generated_text,
  713. token_text.size(), STOP_FULL);
  714. }
  715. if (stop_pos == std::string::npos) {
  716. stop_pos = llama.findStoppingStrings(llama.generated_text, 0, STOP_PARTIAL);
  717. }
  718. if (stop_pos != std::string::npos) {
  719. llama.generated_text.erase(llama.generated_text.begin() + stop_pos,
  720. llama.generated_text.end());
  721. }
  722. const json data = format_final_response(llama, llama.generated_text);
  723. llama_print_timings(llama.ctx);
  724. res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
  725. "application/json");
  726. } else {
  727. const auto chunked_content_provider = [&](size_t, DataSink & sink) {
  728. size_t sent_count = 0;
  729. while (llama.has_next_token) {
  730. const std::string token_text = llama.doCompletion();
  731. if (llama.multibyte_pending > 0) {
  732. continue;
  733. }
  734. size_t pos = std::min(sent_count, llama.generated_text.size());
  735. const std::string str_test = llama.generated_text.substr(pos);
  736. size_t stop_pos =
  737. llama.findStoppingStrings(str_test, token_text.size(), STOP_FULL);
  738. if (stop_pos != std::string::npos) {
  739. llama.generated_text.erase(
  740. llama.generated_text.begin() + pos + stop_pos,
  741. llama.generated_text.end());
  742. pos = std::min(sent_count, llama.generated_text.size());
  743. } else {
  744. stop_pos = llama.findStoppingStrings(str_test, token_text.size(),
  745. STOP_PARTIAL);
  746. }
  747. const std::string to_send = llama.generated_text.substr(pos, stop_pos);
  748. sent_count += to_send.size();
  749. const json data = llama.has_next_token
  750. ? format_partial_response(to_send)
  751. // Generation is done, send extra information.
  752. : format_final_response(llama, to_send);
  753. const std::string str =
  754. "data: " +
  755. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  756. "\n\n";
  757. LOG_VERBOSE("data stream", {
  758. { "to_send", str }
  759. });
  760. if (!sink.write(str.data(), str.size())) {
  761. LOG_VERBOSE("stream closed", {});
  762. llama_print_timings(llama.ctx);
  763. return false;
  764. }
  765. }
  766. llama_print_timings(llama.ctx);
  767. sink.done();
  768. return true;
  769. };
  770. res.set_chunked_content_provider("text/event-stream", chunked_content_provider);
  771. }
  772. });
  773. svr.Options(R"(/.*)", [](const Request &, Response & res) {
  774. return res.set_content("", "application/json");
  775. });
  776. svr.Post("/tokenize", [&llama](const Request & req, Response & res) {
  777. const json body = json::parse(req.body);
  778. const std::string content = body["content"].get<std::string>();
  779. const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
  780. const json data = format_tokenizer_response(tokens);
  781. return res.set_content(data.dump(), "application/json");
  782. });
  783. svr.set_logger(log_server_request);
  784. svr.set_exception_handler([](const Request &, Response & res, std::exception_ptr ep) {
  785. const auto * fmt = "500 Internal Server Error\n%s";
  786. char buf[BUFSIZ];
  787. try {
  788. std::rethrow_exception(std::move(ep));
  789. } catch (std::exception & e) {
  790. snprintf(buf, sizeof(buf), fmt, e.what());
  791. } catch (...) {
  792. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  793. }
  794. res.set_content(buf, "text/plain");
  795. res.status = 500;
  796. });
  797. // set timeouts and change hostname and port
  798. svr.set_read_timeout(sparams.read_timeout);
  799. svr.set_write_timeout(sparams.write_timeout);
  800. if (!svr.bind_to_port(sparams.hostname, sparams.port)) {
  801. LOG_ERROR("couldn't bind to server socket", {
  802. { "hostname", sparams.hostname },
  803. { "port", sparams.port },
  804. });
  805. return 1;
  806. }
  807. LOG_INFO("HTTP server listening", {
  808. { "hostname", sparams.hostname },
  809. { "port", sparams.port },
  810. });
  811. if (!svr.listen_after_bind()) {
  812. return 1;
  813. }
  814. return 0;
  815. }