utils.hpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. #pragma once
  2. #include "llama.h"
  3. #include "common.h"
  4. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  5. #define JSON_ASSERT GGML_ASSERT
  6. #include "json.hpp"
  7. #include <string>
  8. #include <vector>
  9. #include <sstream>
  10. #include <random>
  11. #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
  12. using json = nlohmann::ordered_json;
  13. // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
  14. enum error_type {
  15. ERROR_TYPE_INVALID_REQUEST,
  16. ERROR_TYPE_AUTHENTICATION,
  17. ERROR_TYPE_SERVER,
  18. ERROR_TYPE_NOT_FOUND,
  19. ERROR_TYPE_PERMISSION,
  20. ERROR_TYPE_UNAVAILABLE, // custom error
  21. ERROR_TYPE_NOT_SUPPORTED, // custom error
  22. };
  23. extern bool server_verbose;
  24. extern bool server_log_json;
  25. #ifndef SERVER_VERBOSE
  26. #define SERVER_VERBOSE 1
  27. #endif
  28. #if SERVER_VERBOSE != 1
  29. #define LOG_VERBOSE(MSG, ...)
  30. #else
  31. #define LOG_VERBOSE(MSG, ...) \
  32. do \
  33. { \
  34. if (server_verbose) \
  35. { \
  36. server_log("VERB", __func__, __LINE__, MSG, __VA_ARGS__); \
  37. } \
  38. } while (0)
  39. #endif
  40. #define LOG_ERROR( MSG, ...) server_log("ERR", __func__, __LINE__, MSG, __VA_ARGS__)
  41. #define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
  42. #define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
  43. static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra);
  44. template <typename T>
  45. static T json_value(const json & body, const std::string & key, const T & default_value) {
  46. // Fallback null to default value
  47. if (body.contains(key) && !body.at(key).is_null()) {
  48. try {
  49. return body.at(key);
  50. } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
  51. std::stringstream ss;
  52. ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value.";
  53. LOG_WARNING(ss.str().c_str(), body);
  54. return default_value;
  55. }
  56. } else {
  57. return default_value;
  58. }
  59. }
  60. static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) {
  61. std::stringstream ss_tid;
  62. ss_tid << std::this_thread::get_id();
  63. json log = json{
  64. {"tid", ss_tid.str()},
  65. {"timestamp", time(nullptr)},
  66. };
  67. if (server_log_json) {
  68. log.merge_patch({
  69. {"level", level},
  70. {"function", function},
  71. {"line", line},
  72. {"msg", message},
  73. });
  74. if (!extra.empty()) {
  75. log.merge_patch(extra);
  76. }
  77. printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
  78. } else {
  79. char buf[1024];
  80. snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
  81. if (!extra.empty()) {
  82. log.merge_patch(extra);
  83. }
  84. std::stringstream ss;
  85. ss << buf << " |";
  86. for (const auto & el : log.items())
  87. {
  88. const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
  89. ss << " " << el.key() << "=" << value;
  90. }
  91. const std::string str = ss.str();
  92. printf("%.*s\n", (int)str.size(), str.data());
  93. }
  94. fflush(stdout);
  95. }
  96. //
  97. // chat template utils
  98. //
  99. // Format given chat. If tmpl is empty, we take the template from model metadata
  100. inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
  101. size_t alloc_size = 0;
  102. // vector holding all allocated string to be passed to llama_chat_apply_template
  103. std::vector<std::string> str(messages.size() * 2);
  104. std::vector<llama_chat_message> chat(messages.size());
  105. for (size_t i = 0; i < messages.size(); ++i) {
  106. const auto & curr_msg = messages[i];
  107. str[i*2 + 0] = json_value(curr_msg, "role", std::string(""));
  108. str[i*2 + 1] = json_value(curr_msg, "content", std::string(""));
  109. alloc_size += str[i*2 + 1].length();
  110. chat[i].role = str[i*2 + 0].c_str();
  111. chat[i].content = str[i*2 + 1].c_str();
  112. }
  113. const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
  114. std::vector<char> buf(alloc_size * 2);
  115. // run the first time to get the total output length
  116. int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
  117. // if it turns out that our buffer is too small, we resize it
  118. if ((size_t) res > buf.size()) {
  119. buf.resize(res);
  120. res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
  121. }
  122. const std::string formatted_chat(buf.data(), res);
  123. LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
  124. return formatted_chat;
  125. }
  126. //
  127. // base64 utils (TODO: move to common in the future)
  128. //
  129. static const std::string base64_chars =
  130. "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  131. "abcdefghijklmnopqrstuvwxyz"
  132. "0123456789+/";
  133. static inline bool is_base64(uint8_t c) {
  134. return (isalnum(c) || (c == '+') || (c == '/'));
  135. }
  136. static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
  137. int i = 0;
  138. int j = 0;
  139. int in_ = 0;
  140. int in_len = encoded_string.size();
  141. uint8_t char_array_4[4];
  142. uint8_t char_array_3[3];
  143. std::vector<uint8_t> ret;
  144. while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
  145. char_array_4[i++] = encoded_string[in_]; in_++;
  146. if (i == 4) {
  147. for (i = 0; i < 4; i++) {
  148. char_array_4[i] = base64_chars.find(char_array_4[i]);
  149. }
  150. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  151. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  152. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  153. for (i = 0; (i < 3); i++) {
  154. ret.push_back(char_array_3[i]);
  155. }
  156. i = 0;
  157. }
  158. }
  159. if (i) {
  160. for (j = i; j < 4; j++) {
  161. char_array_4[j] = 0;
  162. }
  163. for (j = 0; j < 4; j++) {
  164. char_array_4[j] = base64_chars.find(char_array_4[j]);
  165. }
  166. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  167. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  168. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  169. for (j = 0; j < i - 1; j++) {
  170. ret.push_back(char_array_3[j]);
  171. }
  172. }
  173. return ret;
  174. }
  175. //
  176. // random string / id
  177. //
  178. static std::string random_string() {
  179. static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  180. std::random_device rd;
  181. std::mt19937 generator(rd());
  182. std::string result(32, ' ');
  183. for (int i = 0; i < 32; ++i) {
  184. result[i] = str[generator() % str.size()];
  185. }
  186. return result;
  187. }
  188. static std::string gen_chatcmplid() {
  189. std::stringstream chatcmplid;
  190. chatcmplid << "chatcmpl-" << random_string();
  191. return chatcmplid.str();
  192. }
  193. //
  194. // other common utils
  195. //
  196. static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
  197. size_t i;
  198. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  199. return i;
  200. }
  201. static size_t common_part(const std::string & a, const std::string & b) {
  202. size_t i;
  203. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  204. return i;
  205. }
  206. static bool ends_with(const std::string & str, const std::string & suffix) {
  207. return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  208. }
  209. static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
  210. if (!text.empty() && !stop.empty()) {
  211. const char text_last_char = text.back();
  212. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  213. if (stop[char_index] == text_last_char) {
  214. const std::string current_partial = stop.substr(0, char_index + 1);
  215. if (ends_with(text, current_partial)) {
  216. return text.size() - char_index - 1;
  217. }
  218. }
  219. }
  220. }
  221. return std::string::npos;
  222. }
  223. // TODO: reuse llama_detokenize
  224. template <class Iter>
  225. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  226. std::string ret;
  227. for (; begin != end; ++begin) {
  228. ret += llama_token_to_piece(ctx, *begin);
  229. }
  230. return ret;
  231. }
  232. // format incomplete utf-8 multibyte character for output
  233. static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
  234. std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
  235. // if the size is 1 and first bit is 1, meaning it's a partial character
  236. // (size > 1 meaning it's already a known token)
  237. if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
  238. std::stringstream ss;
  239. ss << std::hex << (out[0] & 0xff);
  240. std::string res(ss.str());
  241. out = "byte: \\x" + res;
  242. }
  243. return out;
  244. }
  245. struct completion_token_output {
  246. llama_token tok;
  247. std::string text_to_send;
  248. struct token_prob {
  249. llama_token tok;
  250. float prob;
  251. };
  252. std::vector<token_prob> probs;
  253. };
  254. // convert a vector of completion_token_output to json
  255. static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
  256. json out = json::array();
  257. for (const auto & prob : probs) {
  258. json probs_for_token = json::array();
  259. for (const auto & p : prob.probs) {
  260. const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  261. probs_for_token.push_back(json {
  262. {"tok_str", tok_str},
  263. {"prob", p.prob},
  264. });
  265. }
  266. const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  267. out.push_back(json {
  268. {"content", tok_str},
  269. {"probs", probs_for_token},
  270. });
  271. }
  272. return out;
  273. }
  274. //
  275. // OAI utils
  276. //
  277. static json oaicompat_completion_params_parse(
  278. const struct llama_model * model,
  279. const json & body, /* openai api json semantics */
  280. const std::string & chat_template) {
  281. json llama_params;
  282. llama_params["__oaicompat"] = true;
  283. // Map OpenAI parameters to llama.cpp parameters
  284. //
  285. // For parameters that are defined by the OpenAI documentation (e.g.
  286. // temperature), we explicitly specify OpenAI's intended default; we
  287. // need to do that because sometimes OpenAI disagrees with llama.cpp
  288. //
  289. // https://platform.openai.com/docs/api-reference/chat/create
  290. llama_sampling_params default_sparams;
  291. llama_params["model"] = json_value(body, "model", std::string("unknown"));
  292. llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
  293. llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
  294. llama_params["n_predict"] = json_value(body, "max_tokens", -1);
  295. llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
  296. llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
  297. llama_params["stream"] = json_value(body, "stream", false);
  298. llama_params["temperature"] = json_value(body, "temperature", 1.0);
  299. llama_params["top_p"] = json_value(body, "top_p", 1.0);
  300. // Apply chat template to the list of messages
  301. llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
  302. // Handle "stop" field
  303. if (body.contains("stop") && body.at("stop").is_string()) {
  304. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  305. } else {
  306. llama_params["stop"] = json_value(body, "stop", json::array());
  307. }
  308. // Handle "response_format" field
  309. if (body.contains("response_format")) {
  310. json response_format = json_value(body, "response_format", json::object());
  311. std::string response_type = json_value(response_format, "type", std::string());
  312. if (response_type == "json_object") {
  313. llama_params["json_schema"] = json_value(response_format, "schema", json::object());
  314. } else if (!response_type.empty() && response_type != "text") {
  315. throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
  316. }
  317. }
  318. // Handle "n" field
  319. int n_choices = json_value(body, "n", 1);
  320. if (n_choices != 1) {
  321. throw std::runtime_error("Only one completion choice is allowed");
  322. }
  323. // Handle "logprobs" field
  324. // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
  325. if (body.contains("logprobs")) {
  326. llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
  327. } else if (body.contains("top_logprobs")) {
  328. throw std::runtime_error("top_logprobs requires logprobs to be set to true");
  329. }
  330. // Params supported by OAI but unsupported by llama.cpp
  331. static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
  332. for (auto & param : unsupported_params) {
  333. if (body.contains(param)) {
  334. throw std::runtime_error("Unsupported param: " + param);
  335. }
  336. }
  337. // Copy remaining properties to llama_params
  338. // This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
  339. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
  340. for (const auto & item : body.items()) {
  341. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  342. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  343. llama_params[item.key()] = item.value();
  344. }
  345. }
  346. return llama_params;
  347. }
  348. static json format_final_response_oaicompat(const json & request, json result, const std::string & completion_id, bool streaming = false) {
  349. bool stopped_word = result.count("stopped_word") != 0;
  350. bool stopped_eos = json_value(result, "stopped_eos", false);
  351. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  352. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  353. std::string content = json_value(result, "content", std::string(""));
  354. std::string finish_reason = "length";
  355. if (stopped_word || stopped_eos) {
  356. finish_reason = "stop";
  357. }
  358. json choices =
  359. streaming ? json::array({json{{"finish_reason", finish_reason},
  360. {"index", 0},
  361. {"delta", json::object()}}})
  362. : json::array({json{{"finish_reason", finish_reason},
  363. {"index", 0},
  364. {"message", json{{"content", content},
  365. {"role", "assistant"}}}}});
  366. std::time_t t = std::time(0);
  367. json res = json {
  368. {"choices", choices},
  369. {"created", t},
  370. {"model",
  371. json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  372. {"object", streaming ? "chat.completion.chunk" : "chat.completion"},
  373. {"usage", json {
  374. {"completion_tokens", num_tokens_predicted},
  375. {"prompt_tokens", num_prompt_tokens},
  376. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  377. }},
  378. {"id", completion_id}
  379. };
  380. if (server_verbose) {
  381. res["__verbose"] = result;
  382. }
  383. if (result.contains("completion_probabilities")) {
  384. res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
  385. }
  386. return res;
  387. }
  388. // return value is vector as there is one case where we might need to generate two responses
  389. static std::vector<json> format_partial_response_oaicompat(json result, const std::string & completion_id) {
  390. if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
  391. return std::vector<json>({result});
  392. }
  393. bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
  394. std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  395. bool stopped_word = json_value(result, "stopped_word", false);
  396. bool stopped_eos = json_value(result, "stopped_eos", false);
  397. bool stopped_limit = json_value(result, "stopped_limit", false);
  398. std::string content = json_value(result, "content", std::string(""));
  399. std::string finish_reason;
  400. if (stopped_word || stopped_eos) {
  401. finish_reason = "stop";
  402. }
  403. if (stopped_limit) {
  404. finish_reason = "length";
  405. }
  406. std::time_t t = std::time(0);
  407. json choices;
  408. if (!finish_reason.empty()) {
  409. choices = json::array({json{{"finish_reason", finish_reason},
  410. {"index", 0},
  411. {"delta", json::object()}}});
  412. } else {
  413. if (first) {
  414. if (content.empty()) {
  415. choices = json::array({json{{"finish_reason", nullptr},
  416. {"index", 0},
  417. {"delta", json{{"role", "assistant"}}}}});
  418. } else {
  419. // We have to send this as two updates to conform to openai behavior
  420. json initial_ret = json{{"choices", json::array({json{
  421. {"finish_reason", nullptr},
  422. {"index", 0},
  423. {"delta", json{
  424. {"role", "assistant"}
  425. }}}})},
  426. {"created", t},
  427. {"id", completion_id},
  428. {"model", modelname},
  429. {"object", "chat.completion.chunk"}};
  430. json second_ret = json{
  431. {"choices", json::array({json{{"finish_reason", nullptr},
  432. {"index", 0},
  433. {"delta", json{
  434. {"content", content}}}
  435. }})},
  436. {"created", t},
  437. {"id", completion_id},
  438. {"model", modelname},
  439. {"object", "chat.completion.chunk"}};
  440. return std::vector<json>({initial_ret, second_ret});
  441. }
  442. } else {
  443. // Some idiosyncrasy in task processing logic makes several trailing calls
  444. // with empty content, we ignore these at the calee site.
  445. if (content.empty()) {
  446. return std::vector<json>({json::object()});
  447. }
  448. choices = json::array({json{
  449. {"finish_reason", nullptr},
  450. {"index", 0},
  451. {"delta",
  452. json{
  453. {"content", content},
  454. }},
  455. }});
  456. }
  457. }
  458. json ret = json {
  459. {"choices", choices},
  460. {"created", t},
  461. {"id", completion_id},
  462. {"model", modelname},
  463. {"object", "chat.completion.chunk"}
  464. };
  465. if (!finish_reason.empty()) {
  466. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  467. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  468. ret.push_back({"usage", json {
  469. {"completion_tokens", num_tokens_predicted},
  470. {"prompt_tokens", num_prompt_tokens},
  471. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  472. }});
  473. }
  474. return std::vector<json>({ret});
  475. }
  476. static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
  477. json data = json::array();
  478. int i = 0;
  479. for (auto & elem : embeddings) {
  480. data.push_back(json{
  481. {"embedding", json_value(elem, "embedding", json::array())},
  482. {"index", i++},
  483. {"object", "embedding"}
  484. });
  485. }
  486. json res = json {
  487. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  488. {"object", "list"},
  489. {"usage", json {
  490. {"prompt_tokens", 0},
  491. {"total_tokens", 0}
  492. }},
  493. {"data", data}
  494. };
  495. return res;
  496. }
  497. static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
  498. return json {
  499. {"tokens", tokens}
  500. };
  501. }
  502. static json format_detokenized_response(const std::string & content) {
  503. return json {
  504. {"content", content}
  505. };
  506. }
  507. static json format_error_response(const std::string & message, const enum error_type type) {
  508. std::string type_str;
  509. int code = 500;
  510. switch (type) {
  511. case ERROR_TYPE_INVALID_REQUEST:
  512. type_str = "invalid_request_error";
  513. code = 400;
  514. break;
  515. case ERROR_TYPE_AUTHENTICATION:
  516. type_str = "authentication_error";
  517. code = 401;
  518. break;
  519. case ERROR_TYPE_NOT_FOUND:
  520. type_str = "not_found_error";
  521. code = 404;
  522. break;
  523. case ERROR_TYPE_SERVER:
  524. type_str = "server_error";
  525. code = 500;
  526. break;
  527. case ERROR_TYPE_PERMISSION:
  528. type_str = "permission_error";
  529. code = 403;
  530. break;
  531. case ERROR_TYPE_NOT_SUPPORTED:
  532. type_str = "not_supported_error";
  533. code = 501;
  534. break;
  535. case ERROR_TYPE_UNAVAILABLE:
  536. type_str = "unavailable_error";
  537. code = 503;
  538. break;
  539. }
  540. return json {
  541. {"code", code},
  542. {"message", message},
  543. {"type", type_str},
  544. };
  545. }