utils.hpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. #pragma once
  2. #include "common.h"
  3. #include "log.h"
  4. #include "llama.h"
  5. #ifndef NDEBUG
  6. // crash the server in debug mode, otherwise send an http 500 error
  7. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  8. #endif
  9. // increase max payload length to allow use of larger context size
  10. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  11. #include "httplib.h"
  12. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  13. #define JSON_ASSERT GGML_ASSERT
  14. #include "json.hpp"
  15. #include <random>
  16. #include <sstream>
  17. #include <string>
  18. #include <vector>
  19. #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
  20. using json = nlohmann::ordered_json;
  21. // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
  22. enum error_type {
  23. ERROR_TYPE_INVALID_REQUEST,
  24. ERROR_TYPE_AUTHENTICATION,
  25. ERROR_TYPE_SERVER,
  26. ERROR_TYPE_NOT_FOUND,
  27. ERROR_TYPE_PERMISSION,
  28. ERROR_TYPE_UNAVAILABLE, // custom error
  29. ERROR_TYPE_NOT_SUPPORTED, // custom error
  30. };
  31. template <typename T>
  32. static T json_value(const json & body, const std::string & key, const T & default_value) {
  33. // Fallback null to default value
  34. if (body.contains(key) && !body.at(key).is_null()) {
  35. try {
  36. return body.at(key);
  37. } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
  38. LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name());
  39. return default_value;
  40. }
  41. } else {
  42. return default_value;
  43. }
  44. }
  45. //
  46. // chat template utils
  47. //
  48. // Format given chat. If tmpl is empty, we take the template from model metadata
  49. inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
  50. std::vector<llama_chat_msg> chat;
  51. for (size_t i = 0; i < messages.size(); ++i) {
  52. const auto & curr_msg = messages[i];
  53. std::string role = json_value(curr_msg, "role", std::string(""));
  54. std::string content;
  55. if (curr_msg.contains("content")) {
  56. if (curr_msg["content"].is_string()) {
  57. content = curr_msg["content"].get<std::string>();
  58. } else if (curr_msg["content"].is_array()) {
  59. for (const auto & part : curr_msg["content"]) {
  60. if (part.contains("text")) {
  61. content += "\n" + part["text"].get<std::string>();
  62. }
  63. }
  64. } else {
  65. throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  66. }
  67. } else {
  68. throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  69. }
  70. chat.push_back({role, content});
  71. }
  72. const auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
  73. LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
  74. return formatted_chat;
  75. }
  76. //
  77. // base64 utils (TODO: move to common in the future)
  78. //
  79. static const std::string base64_chars =
  80. "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  81. "abcdefghijklmnopqrstuvwxyz"
  82. "0123456789+/";
  83. static inline bool is_base64(uint8_t c) {
  84. return (isalnum(c) || (c == '+') || (c == '/'));
  85. }
  86. static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
  87. int i = 0;
  88. int j = 0;
  89. int in_ = 0;
  90. int in_len = encoded_string.size();
  91. uint8_t char_array_4[4];
  92. uint8_t char_array_3[3];
  93. std::vector<uint8_t> ret;
  94. while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
  95. char_array_4[i++] = encoded_string[in_]; in_++;
  96. if (i == 4) {
  97. for (i = 0; i < 4; i++) {
  98. char_array_4[i] = base64_chars.find(char_array_4[i]);
  99. }
  100. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  101. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  102. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  103. for (i = 0; (i < 3); i++) {
  104. ret.push_back(char_array_3[i]);
  105. }
  106. i = 0;
  107. }
  108. }
  109. if (i) {
  110. for (j = i; j < 4; j++) {
  111. char_array_4[j] = 0;
  112. }
  113. for (j = 0; j < 4; j++) {
  114. char_array_4[j] = base64_chars.find(char_array_4[j]);
  115. }
  116. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  117. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  118. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  119. for (j = 0; j < i - 1; j++) {
  120. ret.push_back(char_array_3[j]);
  121. }
  122. }
  123. return ret;
  124. }
  125. //
  126. // random string / id
  127. //
  128. static std::string random_string() {
  129. static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  130. std::random_device rd;
  131. std::mt19937 generator(rd());
  132. std::string result(32, ' ');
  133. for (int i = 0; i < 32; ++i) {
  134. result[i] = str[generator() % str.size()];
  135. }
  136. return result;
  137. }
  138. static std::string gen_chatcmplid() {
  139. return "chatcmpl-" + random_string();
  140. }
  141. //
  142. // other common utils
  143. //
  144. static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
  145. size_t i;
  146. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  147. return i;
  148. }
  149. static size_t common_part(const std::string & a, const std::string & b) {
  150. size_t i;
  151. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  152. return i;
  153. }
  154. static bool ends_with(const std::string & str, const std::string & suffix) {
  155. return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  156. }
  157. static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
  158. if (!text.empty() && !stop.empty()) {
  159. const char text_last_char = text.back();
  160. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  161. if (stop[char_index] == text_last_char) {
  162. const std::string current_partial = stop.substr(0, char_index + 1);
  163. if (ends_with(text, current_partial)) {
  164. return text.size() - char_index - 1;
  165. }
  166. }
  167. }
  168. }
  169. return std::string::npos;
  170. }
  171. static bool json_is_array_of_numbers(const json & data) {
  172. if (data.is_array()) {
  173. for (const auto & e : data) {
  174. if (!e.is_number()) {
  175. return false;
  176. }
  177. }
  178. return true;
  179. }
  180. return false;
  181. }
  182. // TODO: reuse llama_detokenize
  183. template <class Iter>
  184. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  185. std::string ret;
  186. for (; begin != end; ++begin) {
  187. ret += llama_token_to_piece(ctx, *begin);
  188. }
  189. return ret;
  190. }
  191. // format incomplete utf-8 multibyte character for output
  192. static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
  193. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  194. // if the size is 1 and first bit is 1, meaning it's a partial character
  195. // (size > 1 meaning it's already a known token)
  196. if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
  197. std::stringstream ss;
  198. ss << std::hex << (out[0] & 0xff);
  199. std::string res(ss.str());
  200. out = "byte: \\x" + res;
  201. }
  202. return out;
  203. }
  204. struct completion_token_output {
  205. llama_token tok;
  206. std::string text_to_send;
  207. struct token_prob {
  208. llama_token tok;
  209. float prob;
  210. };
  211. std::vector<token_prob> probs;
  212. };
  213. // convert a vector of completion_token_output to json
  214. static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
  215. json out = json::array();
  216. for (const auto & prob : probs) {
  217. json probs_for_token = json::array();
  218. for (const auto & p : prob.probs) {
  219. const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  220. probs_for_token.push_back(json {
  221. {"tok_str", tok_str},
  222. {"prob", p.prob},
  223. });
  224. }
  225. const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  226. out.push_back(json {
  227. {"content", tok_str},
  228. {"probs", probs_for_token},
  229. });
  230. }
  231. return out;
  232. }
  233. static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) {
  234. const std::string str =
  235. std::string(event) + ": " +
  236. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  237. "\n\n"; // note: these newlines are important (not sure why though, if you know, add a comment to explain)
  238. LOG_DBG("data stream, to_send: %s", str.c_str());
  239. return sink.write(str.c_str(), str.size());
  240. }
  241. //
  242. // OAI utils
  243. //
  244. static json oaicompat_completion_params_parse(
  245. const struct llama_model * model,
  246. const json & body, /* openai api json semantics */
  247. const std::string & chat_template) {
  248. json llama_params;
  249. llama_params["__oaicompat"] = true;
  250. // Apply chat template to the list of messages
  251. llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
  252. // Handle "stop" field
  253. if (body.contains("stop") && body.at("stop").is_string()) {
  254. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  255. } else {
  256. llama_params["stop"] = json_value(body, "stop", json::array());
  257. }
  258. // Handle "response_format" field
  259. if (body.contains("response_format")) {
  260. json response_format = json_value(body, "response_format", json::object());
  261. std::string response_type = json_value(response_format, "type", std::string());
  262. if (response_type == "json_object") {
  263. llama_params["json_schema"] = json_value(response_format, "schema", json::object());
  264. } else if (!response_type.empty() && response_type != "text") {
  265. throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
  266. }
  267. }
  268. // Handle "n" field
  269. int n_choices = json_value(body, "n", 1);
  270. if (n_choices != 1) {
  271. throw std::runtime_error("Only one completion choice is allowed");
  272. }
  273. // Handle "logprobs" field
  274. // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
  275. if (body.contains("logprobs")) {
  276. llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
  277. } else if (body.contains("top_logprobs")) {
  278. throw std::runtime_error("top_logprobs requires logprobs to be set to true");
  279. }
  280. // Params supported by OAI but unsupported by llama.cpp
  281. static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
  282. for (const auto & param : unsupported_params) {
  283. if (body.contains(param)) {
  284. throw std::runtime_error("Unsupported param: " + param);
  285. }
  286. }
  287. // Copy remaining properties to llama_params
  288. // This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
  289. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
  290. for (const auto & item : body.items()) {
  291. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  292. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  293. llama_params[item.key()] = item.value();
  294. }
  295. }
  296. return llama_params;
  297. }
  298. static json format_final_response_oaicompat(const json & request, const json & result, const std::string & completion_id, bool streaming = false, bool verbose = false) {
  299. bool stopped_word = result.count("stopped_word") != 0;
  300. bool stopped_eos = json_value(result, "stopped_eos", false);
  301. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  302. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  303. std::string content = json_value(result, "content", std::string(""));
  304. std::string finish_reason = "length";
  305. if (stopped_word || stopped_eos) {
  306. finish_reason = "stop";
  307. }
  308. json choices =
  309. streaming ? json::array({json{{"finish_reason", finish_reason},
  310. {"index", 0},
  311. {"delta", json::object()}}})
  312. : json::array({json{{"finish_reason", finish_reason},
  313. {"index", 0},
  314. {"message", json{{"content", content},
  315. {"role", "assistant"}}}}});
  316. std::time_t t = std::time(0);
  317. json res = json {
  318. {"choices", choices},
  319. {"created", t},
  320. {"model",
  321. json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  322. {"object", streaming ? "chat.completion.chunk" : "chat.completion"},
  323. {"usage", json {
  324. {"completion_tokens", num_tokens_predicted},
  325. {"prompt_tokens", num_prompt_tokens},
  326. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  327. }},
  328. {"id", completion_id}
  329. };
  330. // extra fields for debugging purposes
  331. if (verbose) {
  332. res["__verbose"] = result;
  333. }
  334. if (result.contains("completion_probabilities")) {
  335. res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
  336. }
  337. return res;
  338. }
  339. // return value is vector as there is one case where we might need to generate two responses
  340. static std::vector<json> format_partial_response_oaicompat(const json & result, const std::string & completion_id) {
  341. if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
  342. return std::vector<json>({result});
  343. }
  344. bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
  345. std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  346. bool stopped_word = json_value(result, "stopped_word", false);
  347. bool stopped_eos = json_value(result, "stopped_eos", false);
  348. bool stopped_limit = json_value(result, "stopped_limit", false);
  349. std::string content = json_value(result, "content", std::string(""));
  350. std::string finish_reason;
  351. if (stopped_word || stopped_eos) {
  352. finish_reason = "stop";
  353. }
  354. if (stopped_limit) {
  355. finish_reason = "length";
  356. }
  357. std::time_t t = std::time(0);
  358. json choices;
  359. if (!finish_reason.empty()) {
  360. choices = json::array({json{{"finish_reason", finish_reason},
  361. {"index", 0},
  362. {"delta", json::object()}}});
  363. } else {
  364. if (first) {
  365. if (content.empty()) {
  366. choices = json::array({json{{"finish_reason", nullptr},
  367. {"index", 0},
  368. {"delta", json{{"role", "assistant"}}}}});
  369. } else {
  370. // We have to send this as two updates to conform to openai behavior
  371. json initial_ret = json{{"choices", json::array({json{
  372. {"finish_reason", nullptr},
  373. {"index", 0},
  374. {"delta", json{
  375. {"role", "assistant"}
  376. }}}})},
  377. {"created", t},
  378. {"id", completion_id},
  379. {"model", modelname},
  380. {"object", "chat.completion.chunk"}};
  381. json second_ret = json{
  382. {"choices", json::array({json{{"finish_reason", nullptr},
  383. {"index", 0},
  384. {"delta", json{
  385. {"content", content}}}
  386. }})},
  387. {"created", t},
  388. {"id", completion_id},
  389. {"model", modelname},
  390. {"object", "chat.completion.chunk"}};
  391. return std::vector<json>({initial_ret, second_ret});
  392. }
  393. } else {
  394. // Some idiosyncrasy in task processing logic makes several trailing calls
  395. // with empty content, we ignore these at the calee site.
  396. if (content.empty()) {
  397. return std::vector<json>({json::object()});
  398. }
  399. choices = json::array({json{
  400. {"finish_reason", nullptr},
  401. {"index", 0},
  402. {"delta",
  403. json{
  404. {"content", content},
  405. }},
  406. }});
  407. }
  408. }
  409. json ret = json {
  410. {"choices", choices},
  411. {"created", t},
  412. {"id", completion_id},
  413. {"model", modelname},
  414. {"object", "chat.completion.chunk"}
  415. };
  416. if (!finish_reason.empty()) {
  417. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  418. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  419. ret.push_back({"usage", json {
  420. {"completion_tokens", num_tokens_predicted},
  421. {"prompt_tokens", num_prompt_tokens},
  422. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  423. }});
  424. }
  425. return std::vector<json>({ret});
  426. }
  427. static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
  428. json data = json::array();
  429. int i = 0;
  430. for (const auto & elem : embeddings) {
  431. data.push_back(json{
  432. {"embedding", json_value(elem, "embedding", json::array())},
  433. {"index", i++},
  434. {"object", "embedding"}
  435. });
  436. }
  437. json res = json {
  438. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  439. {"object", "list"},
  440. {"usage", json {
  441. {"prompt_tokens", 0},
  442. {"total_tokens", 0}
  443. }},
  444. {"data", data}
  445. };
  446. return res;
  447. }
  448. static bool is_valid_utf8(const std::string & str) {
  449. const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data());
  450. const unsigned char* end = bytes + str.length();
  451. while (bytes < end) {
  452. if (*bytes <= 0x7F) {
  453. // 1-byte sequence (0xxxxxxx)
  454. bytes++;
  455. } else if ((*bytes & 0xE0) == 0xC0) {
  456. // 2-byte sequence (110xxxxx 10xxxxxx)
  457. if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80)
  458. return false;
  459. bytes += 2;
  460. } else if ((*bytes & 0xF0) == 0xE0) {
  461. // 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx)
  462. if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80)
  463. return false;
  464. bytes += 3;
  465. } else if ((*bytes & 0xF8) == 0xF0) {
  466. // 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx)
  467. if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 ||
  468. (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80)
  469. return false;
  470. bytes += 4;
  471. } else {
  472. // Invalid UTF-8 lead byte
  473. return false;
  474. }
  475. }
  476. return true;
  477. }
  478. static json format_tokenizer_response(const json & tokens) {
  479. return json {
  480. {"tokens", tokens}
  481. };
  482. }
  483. static json format_detokenized_response(const std::string & content) {
  484. return json {
  485. {"content", content}
  486. };
  487. }
  488. static json format_error_response(const std::string & message, const enum error_type type) {
  489. std::string type_str;
  490. int code = 500;
  491. switch (type) {
  492. case ERROR_TYPE_INVALID_REQUEST:
  493. type_str = "invalid_request_error";
  494. code = 400;
  495. break;
  496. case ERROR_TYPE_AUTHENTICATION:
  497. type_str = "authentication_error";
  498. code = 401;
  499. break;
  500. case ERROR_TYPE_NOT_FOUND:
  501. type_str = "not_found_error";
  502. code = 404;
  503. break;
  504. case ERROR_TYPE_SERVER:
  505. type_str = "server_error";
  506. code = 500;
  507. break;
  508. case ERROR_TYPE_PERMISSION:
  509. type_str = "permission_error";
  510. code = 403;
  511. break;
  512. case ERROR_TYPE_NOT_SUPPORTED:
  513. type_str = "not_supported_error";
  514. code = 501;
  515. break;
  516. case ERROR_TYPE_UNAVAILABLE:
  517. type_str = "unavailable_error";
  518. code = 503;
  519. break;
  520. }
  521. return json {
  522. {"code", code},
  523. {"message", message},
  524. {"type", type_str},
  525. };
  526. }