utils.hpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. #pragma once
  2. #include "common.h"
  3. #include "log.h"
  4. #include "llama.h"
  5. #ifndef NDEBUG
  6. // crash the server in debug mode, otherwise send an http 500 error
  7. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  8. #endif
  9. // increase max payload length to allow use of larger context size
  10. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  11. #include "httplib.h"
  12. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  13. #define JSON_ASSERT GGML_ASSERT
  14. #include "json.hpp"
  15. #include <random>
  16. #include <sstream>
  17. #include <string>
  18. #include <vector>
  19. #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
  20. using json = nlohmann::ordered_json;
  21. // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
  22. enum error_type {
  23. ERROR_TYPE_INVALID_REQUEST,
  24. ERROR_TYPE_AUTHENTICATION,
  25. ERROR_TYPE_SERVER,
  26. ERROR_TYPE_NOT_FOUND,
  27. ERROR_TYPE_PERMISSION,
  28. ERROR_TYPE_UNAVAILABLE, // custom error
  29. ERROR_TYPE_NOT_SUPPORTED, // custom error
  30. };
  31. template <typename T>
  32. static T json_value(const json & body, const std::string & key, const T & default_value) {
  33. // Fallback null to default value
  34. if (body.contains(key) && !body.at(key).is_null()) {
  35. try {
  36. return body.at(key);
  37. } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
  38. LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name());
  39. return default_value;
  40. }
  41. } else {
  42. return default_value;
  43. }
  44. }
  45. //
  46. // chat template utils
  47. //
  48. // Format given chat. If tmpl is empty, we take the template from model metadata
  49. inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
  50. std::vector<common_chat_msg> chat;
  51. for (size_t i = 0; i < messages.size(); ++i) {
  52. const auto & curr_msg = messages[i];
  53. std::string role = json_value(curr_msg, "role", std::string(""));
  54. std::string content;
  55. if (curr_msg.contains("content")) {
  56. if (curr_msg["content"].is_string()) {
  57. content = curr_msg["content"].get<std::string>();
  58. } else if (curr_msg["content"].is_array()) {
  59. for (const auto & part : curr_msg["content"]) {
  60. if (part.contains("text")) {
  61. content += "\n" + part["text"].get<std::string>();
  62. }
  63. }
  64. } else {
  65. throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  66. }
  67. } else {
  68. throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
  69. }
  70. chat.push_back({role, content});
  71. }
  72. const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
  73. LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
  74. return formatted_chat;
  75. }
  76. static std::string llama_get_chat_template(const struct llama_model * model) {
  77. std::string template_key = "tokenizer.chat_template";
  78. // call with NULL buffer to get the total size of the string
  79. int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
  80. if (res < 0) {
  81. return "";
  82. } else {
  83. std::vector<char> model_template(res, 0);
  84. llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
  85. return std::string(model_template.data(), model_template.size());
  86. }
  87. }
  88. //
  89. // base64 utils (TODO: move to common in the future)
  90. //
  91. static const std::string base64_chars =
  92. "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  93. "abcdefghijklmnopqrstuvwxyz"
  94. "0123456789+/";
  95. static inline bool is_base64(uint8_t c) {
  96. return (isalnum(c) || (c == '+') || (c == '/'));
  97. }
  98. static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
  99. int i = 0;
  100. int j = 0;
  101. int in_ = 0;
  102. int in_len = encoded_string.size();
  103. uint8_t char_array_4[4];
  104. uint8_t char_array_3[3];
  105. std::vector<uint8_t> ret;
  106. while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
  107. char_array_4[i++] = encoded_string[in_]; in_++;
  108. if (i == 4) {
  109. for (i = 0; i < 4; i++) {
  110. char_array_4[i] = base64_chars.find(char_array_4[i]);
  111. }
  112. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  113. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  114. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  115. for (i = 0; (i < 3); i++) {
  116. ret.push_back(char_array_3[i]);
  117. }
  118. i = 0;
  119. }
  120. }
  121. if (i) {
  122. for (j = i; j < 4; j++) {
  123. char_array_4[j] = 0;
  124. }
  125. for (j = 0; j < 4; j++) {
  126. char_array_4[j] = base64_chars.find(char_array_4[j]);
  127. }
  128. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  129. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  130. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  131. for (j = 0; j < i - 1; j++) {
  132. ret.push_back(char_array_3[j]);
  133. }
  134. }
  135. return ret;
  136. }
  137. //
  138. // random string / id
  139. //
  140. static std::string random_string() {
  141. static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  142. std::random_device rd;
  143. std::mt19937 generator(rd());
  144. std::string result(32, ' ');
  145. for (int i = 0; i < 32; ++i) {
  146. result[i] = str[generator() % str.size()];
  147. }
  148. return result;
  149. }
  150. static std::string gen_chatcmplid() {
  151. return "chatcmpl-" + random_string();
  152. }
  153. //
  154. // other common utils
  155. //
  156. static size_t longest_common_prefix(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
  157. size_t i;
  158. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  159. return i;
  160. }
  161. static size_t longest_common_prefix(const std::string & a, const std::string & b) {
  162. size_t i;
  163. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  164. return i;
  165. }
  166. static bool ends_with(const std::string & str, const std::string & suffix) {
  167. return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  168. }
  169. static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
  170. if (!text.empty() && !stop.empty()) {
  171. const char text_last_char = text.back();
  172. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  173. if (stop[char_index] == text_last_char) {
  174. const std::string current_partial = stop.substr(0, char_index + 1);
  175. if (ends_with(text, current_partial)) {
  176. return text.size() - char_index - 1;
  177. }
  178. }
  179. }
  180. }
  181. return std::string::npos;
  182. }
  183. static bool json_is_array_of_numbers(const json & data) {
  184. if (data.is_array()) {
  185. for (const auto & e : data) {
  186. if (!e.is_number()) {
  187. return false;
  188. }
  189. }
  190. return true;
  191. }
  192. return false;
  193. }
  194. // TODO: reuse llama_detokenize
  195. template <class Iter>
  196. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  197. std::string ret;
  198. for (; begin != end; ++begin) {
  199. ret += common_token_to_piece(ctx, *begin);
  200. }
  201. return ret;
  202. }
  203. // format incomplete utf-8 multibyte character for output
  204. static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
  205. std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
  206. // if the size is 1 and first bit is 1, meaning it's a partial character
  207. // (size > 1 meaning it's already a known token)
  208. if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
  209. std::stringstream ss;
  210. ss << std::hex << (out[0] & 0xff);
  211. std::string res(ss.str());
  212. out = "byte: \\x" + res;
  213. }
  214. return out;
  215. }
  216. struct completion_token_output {
  217. llama_token tok;
  218. std::string text_to_send;
  219. struct token_prob {
  220. llama_token tok;
  221. float prob;
  222. };
  223. std::vector<token_prob> probs;
  224. };
  225. // convert a vector of completion_token_output to json
  226. static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
  227. json out = json::array();
  228. for (const auto & prob : probs) {
  229. json probs_for_token = json::array();
  230. for (const auto & p : prob.probs) {
  231. const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
  232. probs_for_token.push_back(json {
  233. {"tok_str", tok_str},
  234. {"prob", p.prob},
  235. });
  236. }
  237. const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
  238. out.push_back(json {
  239. {"content", tok_str},
  240. {"probs", probs_for_token},
  241. });
  242. }
  243. return out;
  244. }
  245. static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) {
  246. const std::string str =
  247. std::string(event) + ": " +
  248. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  249. "\n\n"; // note: these newlines are important (not sure why though, if you know, add a comment to explain)
  250. LOG_DBG("data stream, to_send: %s", str.c_str());
  251. return sink.write(str.c_str(), str.size());
  252. }
  253. //
  254. // OAI utils
  255. //
  256. static json oaicompat_completion_params_parse(
  257. const struct llama_model * model,
  258. const json & body, /* openai api json semantics */
  259. const std::string & chat_template) {
  260. json llama_params;
  261. llama_params["__oaicompat"] = true;
  262. // Apply chat template to the list of messages
  263. llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
  264. // Handle "stop" field
  265. if (body.contains("stop") && body.at("stop").is_string()) {
  266. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  267. } else {
  268. llama_params["stop"] = json_value(body, "stop", json::array());
  269. }
  270. // Handle "response_format" field
  271. if (body.contains("response_format")) {
  272. json response_format = json_value(body, "response_format", json::object());
  273. std::string response_type = json_value(response_format, "type", std::string());
  274. if (response_type == "json_object") {
  275. llama_params["json_schema"] = json_value(response_format, "schema", json::object());
  276. } else if (response_type == "json_schema") {
  277. json json_schema = json_value(response_format, "json_schema", json::object());
  278. llama_params["json_schema"] = json_value(json_schema, "schema", json::object());
  279. } else if (!response_type.empty() && response_type != "text") {
  280. throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
  281. }
  282. }
  283. // Handle "n" field
  284. int n_choices = json_value(body, "n", 1);
  285. if (n_choices != 1) {
  286. throw std::runtime_error("Only one completion choice is allowed");
  287. }
  288. // Handle "logprobs" field
  289. // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
  290. if (body.contains("logprobs")) {
  291. llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
  292. } else if (body.contains("top_logprobs")) {
  293. throw std::runtime_error("top_logprobs requires logprobs to be set to true");
  294. }
  295. // Params supported by OAI but unsupported by llama.cpp
  296. static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
  297. for (const auto & param : unsupported_params) {
  298. if (body.contains(param)) {
  299. throw std::runtime_error("Unsupported param: " + param);
  300. }
  301. }
  302. // Copy remaining properties to llama_params
  303. // This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
  304. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
  305. for (const auto & item : body.items()) {
  306. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  307. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  308. llama_params[item.key()] = item.value();
  309. }
  310. }
  311. return llama_params;
  312. }
  313. static json format_final_response_oaicompat(const json & request, const json & result, const std::string & completion_id, bool streaming = false, bool verbose = false) {
  314. bool stopped_word = result.count("stopped_word") != 0;
  315. bool stopped_eos = json_value(result, "stopped_eos", false);
  316. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  317. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  318. std::string content = json_value(result, "content", std::string(""));
  319. std::string finish_reason = "length";
  320. if (stopped_word || stopped_eos) {
  321. finish_reason = "stop";
  322. }
  323. json choices =
  324. streaming ? json::array({json{{"finish_reason", finish_reason},
  325. {"index", 0},
  326. {"delta", json::object()}}})
  327. : json::array({json{{"finish_reason", finish_reason},
  328. {"index", 0},
  329. {"message", json{{"content", content},
  330. {"role", "assistant"}}}}});
  331. std::time_t t = std::time(0);
  332. json res = json {
  333. {"choices", choices},
  334. {"created", t},
  335. {"model",
  336. json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  337. {"object", streaming ? "chat.completion.chunk" : "chat.completion"},
  338. {"usage", json {
  339. {"completion_tokens", num_tokens_predicted},
  340. {"prompt_tokens", num_prompt_tokens},
  341. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  342. }},
  343. {"id", completion_id}
  344. };
  345. // extra fields for debugging purposes
  346. if (verbose) {
  347. res["__verbose"] = result;
  348. }
  349. if (result.contains("completion_probabilities")) {
  350. res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
  351. }
  352. return res;
  353. }
  354. // return value is vector as there is one case where we might need to generate two responses
  355. static std::vector<json> format_partial_response_oaicompat(const json & result, const std::string & completion_id) {
  356. if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
  357. return std::vector<json>({result});
  358. }
  359. bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
  360. std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
  361. bool stopped_word = json_value(result, "stopped_word", false);
  362. bool stopped_eos = json_value(result, "stopped_eos", false);
  363. bool stopped_limit = json_value(result, "stopped_limit", false);
  364. std::string content = json_value(result, "content", std::string(""));
  365. std::string finish_reason;
  366. if (stopped_word || stopped_eos) {
  367. finish_reason = "stop";
  368. }
  369. if (stopped_limit) {
  370. finish_reason = "length";
  371. }
  372. std::time_t t = std::time(0);
  373. json choices;
  374. if (!finish_reason.empty()) {
  375. choices = json::array({json{{"finish_reason", finish_reason},
  376. {"index", 0},
  377. {"delta", json::object()}}});
  378. } else {
  379. if (first) {
  380. if (content.empty()) {
  381. choices = json::array({json{{"finish_reason", nullptr},
  382. {"index", 0},
  383. {"delta", json{{"role", "assistant"}}}}});
  384. } else {
  385. // We have to send this as two updates to conform to openai behavior
  386. json initial_ret = json{{"choices", json::array({json{
  387. {"finish_reason", nullptr},
  388. {"index", 0},
  389. {"delta", json{
  390. {"role", "assistant"}
  391. }}}})},
  392. {"created", t},
  393. {"id", completion_id},
  394. {"model", modelname},
  395. {"object", "chat.completion.chunk"}};
  396. json second_ret = json{
  397. {"choices", json::array({json{{"finish_reason", nullptr},
  398. {"index", 0},
  399. {"delta", json{
  400. {"content", content}}}
  401. }})},
  402. {"created", t},
  403. {"id", completion_id},
  404. {"model", modelname},
  405. {"object", "chat.completion.chunk"}};
  406. return std::vector<json>({initial_ret, second_ret});
  407. }
  408. } else {
  409. // Some idiosyncrasy in task processing logic makes several trailing calls
  410. // with empty content, we ignore these at the calee site.
  411. if (content.empty()) {
  412. return std::vector<json>({json::object()});
  413. }
  414. choices = json::array({json{
  415. {"finish_reason", nullptr},
  416. {"index", 0},
  417. {"delta",
  418. json{
  419. {"content", content},
  420. }},
  421. }});
  422. }
  423. }
  424. json ret = json {
  425. {"choices", choices},
  426. {"created", t},
  427. {"id", completion_id},
  428. {"model", modelname},
  429. {"object", "chat.completion.chunk"}
  430. };
  431. if (!finish_reason.empty()) {
  432. int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
  433. int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
  434. ret.push_back({"usage", json {
  435. {"completion_tokens", num_tokens_predicted},
  436. {"prompt_tokens", num_prompt_tokens},
  437. {"total_tokens", num_tokens_predicted + num_prompt_tokens}
  438. }});
  439. }
  440. return std::vector<json>({ret});
  441. }
  442. static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
  443. json data = json::array();
  444. int i = 0;
  445. for (const auto & elem : embeddings) {
  446. data.push_back(json{
  447. {"embedding", json_value(elem, "embedding", json::array())},
  448. {"index", i++},
  449. {"object", "embedding"}
  450. });
  451. }
  452. json res = json {
  453. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  454. {"object", "list"},
  455. {"usage", json { // TODO: fill
  456. {"prompt_tokens", 0},
  457. {"total_tokens", 0}
  458. }},
  459. {"data", data}
  460. };
  461. return res;
  462. }
  463. static json format_response_rerank(const json & request, const json & ranks) {
  464. json data = json::array();
  465. int i = 0;
  466. for (const auto & rank : ranks) {
  467. data.push_back(json{
  468. {"index", i++},
  469. {"relevance_score", json_value(rank, "score", 0.0)},
  470. });
  471. }
  472. json res = json {
  473. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  474. {"object", "list"},
  475. {"usage", json { // TODO: fill
  476. {"prompt_tokens", 0},
  477. {"total_tokens", 0}
  478. }},
  479. {"results", data}
  480. };
  481. return res;
  482. }
  483. static bool is_valid_utf8(const std::string & str) {
  484. const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data());
  485. const unsigned char* end = bytes + str.length();
  486. while (bytes < end) {
  487. if (*bytes <= 0x7F) {
  488. // 1-byte sequence (0xxxxxxx)
  489. bytes++;
  490. } else if ((*bytes & 0xE0) == 0xC0) {
  491. // 2-byte sequence (110xxxxx 10xxxxxx)
  492. if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80)
  493. return false;
  494. bytes += 2;
  495. } else if ((*bytes & 0xF0) == 0xE0) {
  496. // 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx)
  497. if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80)
  498. return false;
  499. bytes += 3;
  500. } else if ((*bytes & 0xF8) == 0xF0) {
  501. // 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx)
  502. if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 ||
  503. (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80)
  504. return false;
  505. bytes += 4;
  506. } else {
  507. // Invalid UTF-8 lead byte
  508. return false;
  509. }
  510. }
  511. return true;
  512. }
  513. static json format_tokenizer_response(const json & tokens) {
  514. return json {
  515. {"tokens", tokens}
  516. };
  517. }
  518. static json format_detokenized_response(const std::string & content) {
  519. return json {
  520. {"content", content}
  521. };
  522. }
  523. static json format_error_response(const std::string & message, const enum error_type type) {
  524. std::string type_str;
  525. int code = 500;
  526. switch (type) {
  527. case ERROR_TYPE_INVALID_REQUEST:
  528. type_str = "invalid_request_error";
  529. code = 400;
  530. break;
  531. case ERROR_TYPE_AUTHENTICATION:
  532. type_str = "authentication_error";
  533. code = 401;
  534. break;
  535. case ERROR_TYPE_NOT_FOUND:
  536. type_str = "not_found_error";
  537. code = 404;
  538. break;
  539. case ERROR_TYPE_SERVER:
  540. type_str = "server_error";
  541. code = 500;
  542. break;
  543. case ERROR_TYPE_PERMISSION:
  544. type_str = "permission_error";
  545. code = 403;
  546. break;
  547. case ERROR_TYPE_NOT_SUPPORTED:
  548. type_str = "not_supported_error";
  549. code = 501;
  550. break;
  551. case ERROR_TYPE_UNAVAILABLE:
  552. type_str = "unavailable_error";
  553. code = 503;
  554. break;
  555. }
  556. return json {
  557. {"code", code},
  558. {"message", message},
  559. {"type", type_str},
  560. };
  561. }