utils.hpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. #pragma once
  2. #include "llama.h"
  3. #include "common.h"
  4. #ifndef NDEBUG
  5. // crash the server in debug mode, otherwise send an http 500 error
  6. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  7. #endif
  8. // increase max payload length to allow use of larger context size
  9. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  10. #include "httplib.h"
  11. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  12. #define JSON_ASSERT GGML_ASSERT
  13. #include "json.hpp"
  14. #include <string>
  15. #include <vector>
  16. #include <sstream>
  17. #include <random>
  18. #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
  19. using json = nlohmann::ordered_json;
  20. // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
  21. enum error_type {
  22. ERROR_TYPE_INVALID_REQUEST,
  23. ERROR_TYPE_AUTHENTICATION,
  24. ERROR_TYPE_SERVER,
  25. ERROR_TYPE_NOT_FOUND,
  26. ERROR_TYPE_PERMISSION,
  27. ERROR_TYPE_UNAVAILABLE, // custom error
  28. ERROR_TYPE_NOT_SUPPORTED, // custom error
  29. };
  30. extern bool server_verbose;
  31. extern bool server_log_json;
  32. #ifndef SERVER_VERBOSE
  33. #define SERVER_VERBOSE 1
  34. #endif
  35. #if SERVER_VERBOSE != 1
  36. #define LOG_VERBOSE(MSG, ...)
  37. #else
  38. #define LOG_VERBOSE(MSG, ...) \
  39. do \
  40. { \
  41. if (server_verbose) \
  42. { \
  43. server_log("VERB", __func__, __LINE__, MSG, __VA_ARGS__); \
  44. } \
  45. } while (0)
  46. #endif
  47. #define LOG_ERROR( MSG, ...) server_log("ERR", __func__, __LINE__, MSG, __VA_ARGS__)
  48. #define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
  49. #define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  50. static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra);
  51. template <typename T>
  52. static T json_value(const json & body, const std::string & key, const T & default_value) {
  53. // Fallback null to default value
  54. if (body.contains(key) && !body.at(key).is_null()) {
  55. try {
  56. return body.at(key);
  57. } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
  58. std::stringstream ss;
  59. ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value.";
  60. LOG_WARNING(ss.str().c_str(), body);
  61. return default_value;
  62. }
  63. } else {
  64. return default_value;
  65. }
  66. }
  67. static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) {
  68. std::stringstream ss_tid;
  69. ss_tid << std::this_thread::get_id();
  70. json log = json{
  71. {"tid", ss_tid.str()},
  72. {"timestamp", time(nullptr)},
  73. };
  74. if (server_log_json) {
  75. log.merge_patch({
  76. {"level", level},
  77. {"function", function},
  78. {"line", line},
  79. {"msg", message},
  80. });
  81. if (!extra.empty()) {
  82. log.merge_patch(extra);
  83. }
  84. printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
  85. } else {
  86. char buf[1024];
  87. snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
  88. if (!extra.empty()) {
  89. log.merge_patch(extra);
  90. }
  91. std::stringstream ss;
  92. ss << buf << " |";
  93. for (const auto & el : log.items())
  94. {
  95. const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
  96. ss << " " << el.key() << "=" << value;
  97. }
  98. const std::string str = ss.str();
  99. printf("%.*s\n", (int)str.size(), str.data());
  100. }
  101. fflush(stdout);
  102. }
  103. //
  104. // chat template utils
  105. //
  106. // Format given chat. If tmpl is empty, we take the template from model metadata
  107. inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
  108. std::vector<llama_chat_msg> chat;
  109. for (size_t i = 0; i < messages.size(); ++i) {
  110. const auto & curr_msg = messages[i];
  111. std::string role = json_value(curr_msg, "role", std::string(""));
  112. std::string content;
  113. if (curr_msg.contains("content")) {
  114. if (curr_msg["content"].is_string()) {
  115. content = curr_msg["content"].get<std::string>();
  116. } else if (curr_msg["content"].is_array()) {
  117. for (const auto & part : curr_msg["content"]) {
  118. if (part.contains("text")) {
  119. content += "\n" + part["text"].get<std::string>();
  120. }
  121. }
  122. } else {
  123. throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  124. }
  125. } else {
  126. throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  127. }
  128. chat.push_back({role, content});
  129. }
  130. auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
  131. LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
  132. return formatted_chat;
  133. }
  134. //
  135. // base64 utils (TODO: move to common in the future)
  136. //
  137. static const std::string base64_chars =
  138. "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  139. "abcdefghijklmnopqrstuvwxyz"
  140. "0123456789+/";
  141. static inline bool is_base64(uint8_t c) {
  142. return (isalnum(c) || (c == '+') || (c == '/'));
  143. }
  144. static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
  145. int i = 0;
  146. int j = 0;
  147. int in_ = 0;
  148. int in_len = encoded_string.size();
  149. uint8_t char_array_4[4];
  150. uint8_t char_array_3[3];
  151. std::vector<uint8_t> ret;
  152. while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
  153. char_array_4[i++] = encoded_string[in_]; in_++;
  154. if (i == 4) {
  155. for (i = 0; i < 4; i++) {
  156. char_array_4[i] = base64_chars.find(char_array_4[i]);
  157. }
  158. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  159. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  160. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  161. for (i = 0; (i < 3); i++) {
  162. ret.push_back(char_array_3[i]);
  163. }
  164. i = 0;
  165. }
  166. }
  167. if (i) {
  168. for (j = i; j < 4; j++) {
  169. char_array_4[j] = 0;
  170. }
  171. for (j = 0; j < 4; j++) {
  172. char_array_4[j] = base64_chars.find(char_array_4[j]);
  173. }
  174. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  175. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  176. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  177. for (j = 0; j < i - 1; j++) {
  178. ret.push_back(char_array_3[j]);
  179. }
  180. }
  181. return ret;
  182. }
  183. //
  184. // random string / id
  185. //
  186. static std::string random_string() {
  187. static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  188. std::random_device rd;
  189. std::mt19937 generator(rd());
  190. std::string result(32, ' ');
  191. for (int i = 0; i < 32; ++i) {
  192. result[i] = str[generator() % str.size()];
  193. }
  194. return result;
  195. }
  196. static std::string gen_chatcmplid() {
  197. std::stringstream chatcmplid;
  198. chatcmplid << "chatcmpl-" << random_string();
  199. return chatcmplid.str();
  200. }
  201. //
  202. // other common utils
  203. //
  204. static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
  205. size_t i;
  206. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  207. return i;
  208. }
  209. static size_t common_part(const std::string & a, const std::string & b) {
  210. size_t i;
  211. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  212. return i;
  213. }
  214. static bool ends_with(const std::string & str, const std::string & suffix) {
  215. return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  216. }
  217. static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
  218. if (!text.empty() && !stop.empty()) {
  219. const char text_last_char = text.back();
  220. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  221. if (stop[char_index] == text_last_char) {
  222. const std::string current_partial = stop.substr(0, char_index + 1);
  223. if (ends_with(text, current_partial)) {
  224. return text.size() - char_index - 1;
  225. }
  226. }
  227. }
  228. }
  229. return std::string::npos;
  230. }
  231. static bool json_is_array_of_numbers(json data) {
  232. if (data.is_array()) {
  233. for (const auto & e : data) {
  234. if (!e.is_number()) {
  235. return false;
  236. }
  237. }
  238. return true;
  239. }
  240. return false;
  241. }
  242. // TODO: reuse llama_detokenize
  243. template <class Iter>
  244. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  245. std::string ret;
  246. for (; begin != end; ++begin) {
  247. ret += llama_token_to_piece(ctx, *begin);
  248. }
  249. return ret;
  250. }
  251. // format incomplete utf-8 multibyte character for output
  252. static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
  253. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  254. // if the size is 1 and first bit is 1, meaning it's a partial character
  255. // (size > 1 meaning it's already a known token)
  256. if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
  257. std::stringstream ss;
  258. ss << std::hex << (out[0] & 0xff);
  259. std::string res(ss.str());
  260. out = "byte: \\x" + res;
  261. }
  262. return out;
  263. }
  264. struct completion_token_output {
  265. llama_token tok;
  266. std::string text_to_send;
  267. struct token_prob {
  268. llama_token tok;
  269. float prob;
  270. };
  271. std::vector<token_prob> probs;
  272. };
  273. // convert a vector of completion_token_output to json
  274. static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
  275. json out = json::array();
  276. for (const auto & prob : probs) {
  277. json probs_for_token = json::array();
  278. for (const auto & p : prob.probs) {
  279. const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  280. probs_for_token.push_back(json {
  281. {"tok_str", tok_str},
  282. {"prob", p.prob},
  283. });
  284. }
  285. const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  286. out.push_back(json {
  287. {"content", tok_str},
  288. {"probs", probs_for_token},
  289. });
  290. }
  291. return out;
  292. }
  293. static bool server_sent_event(httplib::DataSink & sink, const char * event, json & data) {
  294. const std::string str =
  295. std::string(event) + ": " +
  296. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  297. "\n\n";
  298. LOG_VERBOSE("data stream", {
  299. { "to_send", str }
  300. });
  301. return sink.write(str.c_str(), str.size());
  302. }
  303. //
  304. // OAI utils
  305. //
  306. static json oaicompat_completion_params_parse(
  307. const struct llama_model * model,
  308. const json & body, /* openai api json semantics */
  309. const std::string & chat_template) {
  310. json llama_params;
  311. llama_params["__oaicompat"] = true;
  312. // Apply chat template to the list of messages
  313. llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
  314. // Handle "stop" field
  315. if (body.contains("stop") && body.at("stop").is_string()) {
  316. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  317. } else {
  318. llama_params["stop"] = json_value(body, "stop", json::array());
  319. }
  320. // Handle "response_format" field
  321. if (body.contains("response_format")) {
  322. json response_format = json_value(body, "response_format", json::object());
  323. std::string response_type = json_value(response_format, "type", std::string());
  324. if (response_type == "json_object") {
  325. llama_params["json_schema"] = json_value(response_format, "schema", json::object());
  326. } else if (!response_type.empty() && response_type != "text") {
  327. throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
  328. }
  329. }
  330. // Handle "n" field
  331. int n_choices = json_value(body, "n", 1);
  332. if (n_choices != 1) {
  333. throw std::runtime_error("Only one completion choice is allowed");
  334. }
  335. // Handle "logprobs" field
  336. // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
  337. if (body.contains("logprobs")) {
  338. llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
  339. } else if (body.contains("top_logprobs")) {
  340. throw std::runtime_error("top_logprobs requires logprobs to be set to true");
  341. }
  342. // Params supported by OAI but unsupported by llama.cpp
  343. static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
  344. for (auto & param : unsupported_params) {
  345. if (body.contains(param)) {
  346. throw std::runtime_error("Unsupported param: " + param);
  347. }
  348. }
  349. // Copy remaining properties to llama_params
  350. // This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
  351. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
  352. for (const auto & item : body.items()) {
  353. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  354. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  355. llama_params[item.key()] = item.value();
  356. }
  357. }
  358. return llama_params;
  359. }
  360. static json format_final_response_oaicompat(const json & request, json result, const std::string & completion_id, bool streaming = false) {
  361. bool stopped_word = result.count("stopped_word") != 0;
  362. bool stopped_eos = json_value(result, "stopped_eos", false);
  363. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  364. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  365. std::string content = json_value(result, "content", std::string(""));
  366. std::string finish_reason = "length";
  367. if (stopped_word || stopped_eos) {
  368. finish_reason = "stop";
  369. }
  370. json choices =
  371. streaming ? json::array({json{{"finish_reason", finish_reason},
  372. {"index", 0},
  373. {"delta", json::object()}}})
  374. : json::array({json{{"finish_reason", finish_reason},
  375. {"index", 0},
  376. {"message", json{{"content", content},
  377. {"role", "assistant"}}}}});
  378. std::time_t t = std::time(0);
  379. json res = json {
  380. {"choices", choices},
  381. {"created", t},
  382. {"model",
  383. json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  384. {"object", streaming ? "chat.completion.chunk" : "chat.completion"},
  385. {"usage", json {
  386. {"completion_tokens", num_tokens_predicted},
  387. {"prompt_tokens", num_prompt_tokens},
  388. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  389. }},
  390. {"id", completion_id}
  391. };
  392. if (server_verbose) {
  393. res["__verbose"] = result;
  394. }
  395. if (result.contains("completion_probabilities")) {
  396. res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
  397. }
  398. return res;
  399. }
  400. // return value is vector as there is one case where we might need to generate two responses
  401. static std::vector<json> format_partial_response_oaicompat(json result, const std::string & completion_id) {
  402. if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
  403. return std::vector<json>({result});
  404. }
  405. bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
  406. std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  407. bool stopped_word = json_value(result, "stopped_word", false);
  408. bool stopped_eos = json_value(result, "stopped_eos", false);
  409. bool stopped_limit = json_value(result, "stopped_limit", false);
  410. std::string content = json_value(result, "content", std::string(""));
  411. std::string finish_reason;
  412. if (stopped_word || stopped_eos) {
  413. finish_reason = "stop";
  414. }
  415. if (stopped_limit) {
  416. finish_reason = "length";
  417. }
  418. std::time_t t = std::time(0);
  419. json choices;
  420. if (!finish_reason.empty()) {
  421. choices = json::array({json{{"finish_reason", finish_reason},
  422. {"index", 0},
  423. {"delta", json::object()}}});
  424. } else {
  425. if (first) {
  426. if (content.empty()) {
  427. choices = json::array({json{{"finish_reason", nullptr},
  428. {"index", 0},
  429. {"delta", json{{"role", "assistant"}}}}});
  430. } else {
  431. // We have to send this as two updates to conform to openai behavior
  432. json initial_ret = json{{"choices", json::array({json{
  433. {"finish_reason", nullptr},
  434. {"index", 0},
  435. {"delta", json{
  436. {"role", "assistant"}
  437. }}}})},
  438. {"created", t},
  439. {"id", completion_id},
  440. {"model", modelname},
  441. {"object", "chat.completion.chunk"}};
  442. json second_ret = json{
  443. {"choices", json::array({json{{"finish_reason", nullptr},
  444. {"index", 0},
  445. {"delta", json{
  446. {"content", content}}}
  447. }})},
  448. {"created", t},
  449. {"id", completion_id},
  450. {"model", modelname},
  451. {"object", "chat.completion.chunk"}};
  452. return std::vector<json>({initial_ret, second_ret});
  453. }
  454. } else {
  455. // Some idiosyncrasy in task processing logic makes several trailing calls
  456. // with empty content, we ignore these at the calee site.
  457. if (content.empty()) {
  458. return std::vector<json>({json::object()});
  459. }
  460. choices = json::array({json{
  461. {"finish_reason", nullptr},
  462. {"index", 0},
  463. {"delta",
  464. json{
  465. {"content", content},
  466. }},
  467. }});
  468. }
  469. }
  470. json ret = json {
  471. {"choices", choices},
  472. {"created", t},
  473. {"id", completion_id},
  474. {"model", modelname},
  475. {"object", "chat.completion.chunk"}
  476. };
  477. if (!finish_reason.empty()) {
  478. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  479. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  480. ret.push_back({"usage", json {
  481. {"completion_tokens", num_tokens_predicted},
  482. {"prompt_tokens", num_prompt_tokens},
  483. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  484. }});
  485. }
  486. return std::vector<json>({ret});
  487. }
  488. static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
  489. json data = json::array();
  490. int i = 0;
  491. for (auto & elem : embeddings) {
  492. data.push_back(json{
  493. {"embedding", json_value(elem, "embedding", json::array())},
  494. {"index", i++},
  495. {"object", "embedding"}
  496. });
  497. }
  498. json res = json {
  499. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  500. {"object", "list"},
  501. {"usage", json {
  502. {"prompt_tokens", 0},
  503. {"total_tokens", 0}
  504. }},
  505. {"data", data}
  506. };
  507. return res;
  508. }
  509. static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
  510. return json {
  511. {"tokens", tokens}
  512. };
  513. }
  514. static json format_detokenized_response(const std::string & content) {
  515. return json {
  516. {"content", content}
  517. };
  518. }
  519. static json format_error_response(const std::string & message, const enum error_type type) {
  520. std::string type_str;
  521. int code = 500;
  522. switch (type) {
  523. case ERROR_TYPE_INVALID_REQUEST:
  524. type_str = "invalid_request_error";
  525. code = 400;
  526. break;
  527. case ERROR_TYPE_AUTHENTICATION:
  528. type_str = "authentication_error";
  529. code = 401;
  530. break;
  531. case ERROR_TYPE_NOT_FOUND:
  532. type_str = "not_found_error";
  533. code = 404;
  534. break;
  535. case ERROR_TYPE_SERVER:
  536. type_str = "server_error";
  537. code = 500;
  538. break;
  539. case ERROR_TYPE_PERMISSION:
  540. type_str = "permission_error";
  541. code = 403;
  542. break;
  543. case ERROR_TYPE_NOT_SUPPORTED:
  544. type_str = "not_supported_error";
  545. code = 501;
  546. break;
  547. case ERROR_TYPE_UNAVAILABLE:
  548. type_str = "unavailable_error";
  549. code = 503;
  550. break;
  551. }
  552. return json {
  553. {"code", code},
  554. {"message", message},
  555. {"type", type_str},
  556. };
  557. }