utils.hpp 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. #pragma once
  2. #include "common.h"
  3. #include "log.h"
  4. #include "llama.h"
  5. #include "arg.h" // common_remote_get_content
  6. #include "base64.hpp"
  7. #include "mtmd.h"
  8. // increase max payload length to allow use of larger context size
  9. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  10. // disable Nagle's algorithm
  11. #define CPPHTTPLIB_TCP_NODELAY true
  12. #include "httplib.h"
  13. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  14. #define JSON_ASSERT GGML_ASSERT
  15. #include "json.hpp"
  16. #include "chat.h"
  17. #include <random>
  18. #include <sstream>
  19. #include <string>
  20. #include <vector>
  21. #include <memory>
  22. #include <cinttypes>
  23. #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo"
  24. using json = nlohmann::ordered_json;
  25. #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  26. #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  27. #define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  28. #define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
  29. #define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  30. #define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  31. #define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  32. #define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  33. #define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  34. #define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  35. #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  36. #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
  37. using raw_buffer = std::vector<uint8_t>;
  38. template <typename T>
  39. static T json_value(const json & body, const std::string & key, const T & default_value) {
  40. // Fallback null to default value
  41. if (body.contains(key) && !body.at(key).is_null()) {
  42. try {
  43. return body.at(key);
  44. } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
  45. LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name());
  46. return default_value;
  47. }
  48. } else {
  49. return default_value;
  50. }
  51. }
  52. const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT);
  53. // thin wrapper around common_grammar_trigger with (de)serialization functions
  54. struct server_grammar_trigger {
  55. common_grammar_trigger value;
  56. server_grammar_trigger() = default;
  57. server_grammar_trigger(const common_grammar_trigger & value) : value(value) {}
  58. server_grammar_trigger(const json & in) {
  59. value.type = (common_grammar_trigger_type) in.at("type").get<int>();
  60. value.value = in.at("value").get<std::string>();
  61. if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
  62. value.token = (llama_token) in.at("token").get<int>();
  63. }
  64. }
  65. json to_json() const {
  66. json out {
  67. {"type", (int) value.type},
  68. {"value", value.value},
  69. };
  70. if (value.type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
  71. out["token"] = (int) value.token;
  72. }
  73. return out;
  74. }
  75. };
  76. //
  77. // tokenizer and input processing utils
  78. //
  79. static bool json_is_array_of_numbers(const json & data) {
  80. if (data.is_array()) {
  81. for (const auto & e : data) {
  82. if (!e.is_number_integer()) {
  83. return false;
  84. }
  85. }
  86. return true;
  87. }
  88. return false;
  89. }
  90. // is array having BOTH numbers & strings?
  91. static bool json_is_array_of_mixed_numbers_strings(const json & data) {
  92. bool seen_string = false;
  93. bool seen_number = false;
  94. if (data.is_array()) {
  95. for (const auto & e : data) {
  96. seen_string |= e.is_string();
  97. seen_number |= e.is_number_integer();
  98. if (seen_number && seen_string) {
  99. return true;
  100. }
  101. }
  102. }
  103. return false;
  104. }
  105. // get value by path(key1 / key2)
  106. static json json_get_nested_values(const std::vector<std::string> & paths, const json & js) {
  107. json result = json::object();
  108. for (const std::string & path : paths) {
  109. json current = js;
  110. const auto keys = string_split<std::string>(path, /*separator*/ '/');
  111. bool valid_path = true;
  112. for (const std::string & k : keys) {
  113. if (valid_path && current.is_object() && current.contains(k)) {
  114. current = current[k];
  115. } else {
  116. valid_path = false;
  117. }
  118. }
  119. if (valid_path) {
  120. result[path] = current;
  121. }
  122. }
  123. return result;
  124. }
  125. /**
  126. * this handles 2 cases:
  127. * - only string, example: "string"
  128. * - mixed string and tokens, example: [12, 34, "string", 56, 78]
  129. */
  130. static llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) {
  131. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  132. // or the first element of the json_prompt array is a string.
  133. llama_tokens prompt_tokens;
  134. if (json_prompt.is_array()) {
  135. bool first = true;
  136. for (const auto & p : json_prompt) {
  137. if (p.is_string()) {
  138. auto s = p.template get<std::string>();
  139. llama_tokens p;
  140. if (first) {
  141. p = common_tokenize(vocab, s, add_special, parse_special);
  142. first = false;
  143. } else {
  144. p = common_tokenize(vocab, s, false, parse_special);
  145. }
  146. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  147. } else {
  148. if (first) {
  149. first = false;
  150. }
  151. prompt_tokens.push_back(p.template get<llama_token>());
  152. }
  153. }
  154. } else {
  155. auto s = json_prompt.template get<std::string>();
  156. prompt_tokens = common_tokenize(vocab, s, add_special, parse_special);
  157. }
  158. return prompt_tokens;
  159. }
  160. /**
  161. * break the input "prompt" object into multiple prompt if needed, then tokenize them
  162. * this supports these cases:
  163. * - "prompt": "string"
  164. * - "prompt": [12, 34, 56]
  165. * - "prompt": [12, 34, "string", 56, 78]
  166. * and multiple prompts (multi-tasks):
  167. * - "prompt": ["string1", "string2"]
  168. * - "prompt": ["string1", [12, 34, 56]]
  169. * - "prompt": [[12, 34, 56], [78, 90, 12]]
  170. * - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]]
  171. */
  172. static std::vector<llama_tokens> tokenize_input_prompts(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) {
  173. std::vector<llama_tokens> result;
  174. if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) {
  175. // string or mixed
  176. result.push_back(tokenize_mixed(vocab, json_prompt, add_special, parse_special));
  177. } else if (json_is_array_of_numbers(json_prompt)) {
  178. // array of tokens
  179. result.push_back(json_prompt.get<llama_tokens>());
  180. } else if (json_prompt.is_array()) {
  181. // array of prompts
  182. result.reserve(json_prompt.size());
  183. for (const auto & p : json_prompt) {
  184. if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) {
  185. result.push_back(tokenize_mixed(vocab, p, add_special, parse_special));
  186. } else if (json_is_array_of_numbers(p)) {
  187. // array of tokens
  188. result.push_back(p.get<llama_tokens>());
  189. } else {
  190. throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens");
  191. }
  192. }
  193. } else {
  194. throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts");
  195. }
  196. if (result.empty()) {
  197. throw std::runtime_error("\"prompt\" must not be empty");
  198. }
  199. return result;
  200. }
  201. // return the last index of character that can form a valid string
  202. // if the last character is potentially cut in half, return the index before the cut
  203. // if validate_utf8(text) == text.size(), then the whole text is valid utf8
  204. static size_t validate_utf8(const std::string& text) {
  205. size_t len = text.size();
  206. if (len == 0) return 0;
  207. // Check the last few bytes to see if a multi-byte character is cut off
  208. for (size_t i = 1; i <= 4 && i <= len; ++i) {
  209. unsigned char c = text[len - i];
  210. // Check for start of a multi-byte sequence from the end
  211. if ((c & 0xE0) == 0xC0) {
  212. // 2-byte character start: 110xxxxx
  213. // Needs at least 2 bytes
  214. if (i < 2) return len - i;
  215. } else if ((c & 0xF0) == 0xE0) {
  216. // 3-byte character start: 1110xxxx
  217. // Needs at least 3 bytes
  218. if (i < 3) return len - i;
  219. } else if ((c & 0xF8) == 0xF0) {
  220. // 4-byte character start: 11110xxx
  221. // Needs at least 4 bytes
  222. if (i < 4) return len - i;
  223. }
  224. }
  225. // If no cut-off multi-byte character is found, return full length
  226. return len;
  227. }
  228. //
  229. // template utils
  230. //
  231. // format rerank task: [BOS]query[EOS][SEP]doc[EOS]
  232. static llama_tokens format_rerank(const struct llama_vocab * vocab, const llama_tokens & query, const llama_tokens & doc) {
  233. llama_tokens result;
  234. result.reserve(doc.size() + query.size() + 4);
  235. result.push_back(llama_vocab_bos(vocab));
  236. result.insert(result.end(), query.begin(), query.end());
  237. result.push_back(llama_vocab_eos(vocab));
  238. result.push_back(llama_vocab_sep(vocab));
  239. result.insert(result.end(), doc.begin(), doc.end());
  240. result.push_back(llama_vocab_eos(vocab));
  241. return result;
  242. }
  243. // format infill task
  244. static llama_tokens format_infill(
  245. const llama_vocab * vocab,
  246. const json & input_prefix,
  247. const json & input_suffix,
  248. const json & input_extra,
  249. const int n_batch,
  250. const int n_predict,
  251. const int n_ctx,
  252. const bool spm_infill,
  253. const llama_tokens & tokens_prompt
  254. ) {
  255. // TODO: optimize this block by reducing memory allocations and movement
  256. // use FIM repo-level pattern:
  257. // ref: https://arxiv.org/pdf/2409.12186
  258. //
  259. // [FIM_REP]myproject
  260. // [FIM_SEP]filename0
  261. // extra chunk 0
  262. // [FIM_SEP]filename1
  263. // extra chunk 1
  264. // ...
  265. // [FIM_SEP]filename
  266. // [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt
  267. //
  268. llama_tokens extra_tokens;
  269. extra_tokens.reserve(n_ctx);
  270. auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false);
  271. auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false);
  272. if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) {
  273. // TODO: make project name an input
  274. static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false);
  275. extra_tokens.push_back(llama_vocab_fim_rep(vocab));
  276. extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end());
  277. }
  278. for (const auto & chunk : input_extra) {
  279. // { "text": string, "filename": string }
  280. const std::string text = json_value(chunk, "text", std::string());
  281. const std::string filename = json_value(chunk, "filename", std::string("tmp"));
  282. if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) {
  283. const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false);
  284. extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab));
  285. extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
  286. } else {
  287. // chunk separator in binary form to avoid confusing the AI
  288. static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00};
  289. static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false);
  290. extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end());
  291. }
  292. const auto chunk_tokens = common_tokenize(vocab, text, false, false);
  293. extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end());
  294. }
  295. if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) {
  296. // TODO: current filename
  297. static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false);
  298. extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab));
  299. extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
  300. }
  301. // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
  302. const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4));
  303. const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
  304. SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
  305. // fill the rest of the context with extra chunks
  306. const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
  307. tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take);
  308. tokens_suffix.resize(n_suffix_take);
  309. tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab));
  310. tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end());
  311. tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab));
  312. auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix;
  313. auto embd_end = spm_infill ? tokens_prefix : tokens_suffix;
  314. if (llama_vocab_get_add_bos(vocab)) {
  315. embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab));
  316. }
  317. SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size());
  318. // put the extra context before the FIM prefix
  319. embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end());
  320. embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
  321. embd_inp.push_back(llama_vocab_fim_mid(vocab));
  322. return embd_inp;
  323. }
  324. //
  325. // base64 utils (TODO: move to common in the future)
  326. //
  327. static const std::string base64_chars =
  328. "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  329. "abcdefghijklmnopqrstuvwxyz"
  330. "0123456789+/";
  331. static inline bool is_base64(uint8_t c) {
  332. return (isalnum(c) || (c == '+') || (c == '/'));
  333. }
  334. static inline raw_buffer base64_decode(const std::string & encoded_string) {
  335. int i = 0;
  336. int j = 0;
  337. int in_ = 0;
  338. int in_len = encoded_string.size();
  339. uint8_t char_array_4[4];
  340. uint8_t char_array_3[3];
  341. raw_buffer ret;
  342. while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
  343. char_array_4[i++] = encoded_string[in_]; in_++;
  344. if (i == 4) {
  345. for (i = 0; i < 4; i++) {
  346. char_array_4[i] = base64_chars.find(char_array_4[i]);
  347. }
  348. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  349. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  350. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  351. for (i = 0; (i < 3); i++) {
  352. ret.push_back(char_array_3[i]);
  353. }
  354. i = 0;
  355. }
  356. }
  357. if (i) {
  358. for (j = i; j < 4; j++) {
  359. char_array_4[j] = 0;
  360. }
  361. for (j = 0; j < 4; j++) {
  362. char_array_4[j] = base64_chars.find(char_array_4[j]);
  363. }
  364. char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
  365. char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
  366. char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
  367. for (j = 0; j < i - 1; j++) {
  368. ret.push_back(char_array_3[j]);
  369. }
  370. }
  371. return ret;
  372. }
  373. //
  374. // random string / id
  375. //
  376. static std::string random_string() {
  377. static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  378. std::random_device rd;
  379. std::mt19937 generator(rd());
  380. std::string result(32, ' ');
  381. for (int i = 0; i < 32; ++i) {
  382. result[i] = str[generator() % str.size()];
  383. }
  384. return result;
  385. }
  386. static std::string gen_chatcmplid() {
  387. return "chatcmpl-" + random_string();
  388. }
  389. static std::string gen_tool_call_id() {
  390. return random_string();
  391. }
  392. //
  393. // other common utils
  394. //
  395. static bool ends_with(const std::string & str, const std::string & suffix) {
  396. return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
  397. }
  398. static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
  399. if (!text.empty() && !stop.empty()) {
  400. const char text_last_char = text.back();
  401. for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
  402. if (stop[char_index] == text_last_char) {
  403. const std::string current_partial = stop.substr(0, char_index + 1);
  404. if (ends_with(text, current_partial)) {
  405. return text.size() - char_index - 1;
  406. }
  407. }
  408. }
  409. }
  410. return std::string::npos;
  411. }
  412. // TODO: reuse llama_detokenize
  413. template <class Iter>
  414. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  415. std::string ret;
  416. for (; begin != end; ++begin) {
  417. ret += common_token_to_piece(ctx, *begin);
  418. }
  419. return ret;
  420. }
  421. // format incomplete utf-8 multibyte character for output
  422. static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
  423. std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token);
  424. // if the size is 1 and first bit is 1, meaning it's a partial character
  425. // (size > 1 meaning it's already a known token)
  426. if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
  427. std::stringstream ss;
  428. ss << std::hex << (out[0] & 0xff);
  429. std::string res(ss.str());
  430. out = "byte: \\x" + res;
  431. }
  432. return out;
  433. }
  434. static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) {
  435. const std::string str =
  436. std::string(event) + ": " +
  437. data.dump(-1, ' ', false, json::error_handler_t::replace) +
  438. "\n\n"; // required by RFC 8895 - A message is terminated by a blank line (two line terminators in a row).
  439. LOG_DBG("data stream, to_send: %s", str.c_str());
  440. return sink.write(str.c_str(), str.size());
  441. }
  442. //
  443. // OAI utils
  444. //
  445. static json oaicompat_completion_params_parse(const json & body) {
  446. json llama_params;
  447. if (!body.contains("prompt")) {
  448. throw std::runtime_error("\"prompt\" is required");
  449. }
  450. // Handle "stop" field
  451. if (body.contains("stop") && body.at("stop").is_string()) {
  452. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  453. } else {
  454. llama_params["stop"] = json_value(body, "stop", json::array());
  455. }
  456. // Handle "n" field
  457. int n_choices = json_value(body, "n", 1);
  458. if (n_choices != 1) {
  459. throw std::runtime_error("Only one completion choice is allowed");
  460. }
  461. // Handle "echo" field
  462. if (json_value(body, "echo", false)) {
  463. throw std::runtime_error("Only no echo is supported");
  464. }
  465. // Params supported by OAI but unsupported by llama.cpp
  466. static const std::vector<std::string> unsupported_params { "best_of", "suffix" };
  467. for (const auto & param : unsupported_params) {
  468. if (body.contains(param)) {
  469. throw std::runtime_error("Unsupported param: " + param);
  470. }
  471. }
  472. // Copy remaining properties to llama_params
  473. for (const auto & item : body.items()) {
  474. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  475. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  476. llama_params[item.key()] = item.value();
  477. }
  478. }
  479. return llama_params;
  480. }
  481. static json oaicompat_completion_params_parse(
  482. const json & body, /* openai api json semantics */
  483. bool use_jinja,
  484. common_reasoning_format reasoning_format,
  485. const struct common_chat_templates * tmpls,
  486. bool allow_non_text,
  487. std::vector<raw_buffer> & out_files)
  488. {
  489. json llama_params;
  490. auto tools = json_value(body, "tools", json());
  491. auto stream = json_value(body, "stream", false);
  492. if (tools.is_array() && !tools.empty()) {
  493. if (stream) {
  494. throw std::runtime_error("Cannot use tools with stream");
  495. }
  496. if (!use_jinja) {
  497. throw std::runtime_error("tools param requires --jinja flag");
  498. }
  499. }
  500. if (!use_jinja) {
  501. if (body.contains("tool_choice") && !body.at("tool_choice").is_null()) {
  502. throw std::runtime_error("Unsupported param: tool_choice");
  503. }
  504. }
  505. // Handle "stop" field
  506. if (body.contains("stop") && body.at("stop").is_string()) {
  507. llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
  508. } else {
  509. llama_params["stop"] = json_value(body, "stop", json::array());
  510. }
  511. auto json_schema = json_value(body, "json_schema", json());
  512. auto grammar = json_value(body, "grammar", std::string());
  513. if (!json_schema.is_null() && !grammar.empty()) {
  514. throw std::runtime_error("Cannot use both json_schema and grammar");
  515. }
  516. // Handle "response_format" field
  517. if (body.contains("response_format")) {
  518. json response_format = json_value(body, "response_format", json::object());
  519. std::string response_type = json_value(response_format, "type", std::string());
  520. if (response_type == "json_object") {
  521. json_schema = json_value(response_format, "schema", json::object());
  522. } else if (response_type == "json_schema") {
  523. auto schema_wrapper = json_value(response_format, "json_schema", json::object());
  524. json_schema = json_value(schema_wrapper, "schema", json::object());
  525. } else if (!response_type.empty() && response_type != "text") {
  526. throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
  527. }
  528. }
  529. // get input files
  530. if (!body.contains("messages")) {
  531. throw std::runtime_error("'messages' is required");
  532. }
  533. json messages = body.at("messages");
  534. if (!messages.is_array()) {
  535. throw std::runtime_error("Expected 'messages' to be an array");
  536. }
  537. for (auto & msg : messages) {
  538. json & content = msg.at("content");
  539. if (content.is_string() || content.is_null()) {
  540. continue;
  541. }
  542. if (!content.is_array()) {
  543. throw std::runtime_error("Expected 'content' to be a string or an array");
  544. }
  545. for (auto & p : content) {
  546. std::string type = json_value(p, "type", std::string());
  547. json image_url = json_value(p, "image_url", json::object());
  548. if (type == "image_url") {
  549. if (!allow_non_text) {
  550. throw std::runtime_error("image input is not supported by this server");
  551. }
  552. std::string url = json_value(image_url, "url", std::string());
  553. if (string_starts_with(url, "http")) {
  554. // download remote image
  555. // TODO @ngxson : maybe make these params configurable
  556. common_remote_params params;
  557. params.headers.push_back("User-Agent: llama.cpp/" + build_info);
  558. params.max_size = 1024 * 1024 * 10; // 10MB
  559. params.timeout = 10; // seconds
  560. SRV_INF("downloading image from '%s'\n", url.c_str());
  561. auto res = common_remote_get_content(url, params);
  562. if (200 <= res.first && res.first < 300) {
  563. SRV_INF("downloaded %ld bytes\n", res.second.size());
  564. raw_buffer data;
  565. data.insert(data.end(), res.second.begin(), res.second.end());
  566. out_files.push_back(data);
  567. } else {
  568. throw std::runtime_error("Failed to download image");
  569. }
  570. } else {
  571. // try to decode base64 image
  572. std::vector<std::string> parts = string_split<std::string>(url, /*separator*/ ',');
  573. if (parts.size() != 2) {
  574. throw std::runtime_error("Invalid image_url.url value");
  575. } else if (!string_starts_with(parts[0], "data:image/")) {
  576. throw std::runtime_error("Invalid image_url.url format: " + parts[0]);
  577. } else if (!string_ends_with(parts[0], "base64")) {
  578. throw std::runtime_error("image_url.url must be base64 encoded");
  579. } else {
  580. auto base64_data = parts[1];
  581. auto decoded_data = base64_decode(base64_data);
  582. out_files.push_back(decoded_data);
  583. }
  584. }
  585. // replace this chunk with a marker
  586. p["type"] = "text";
  587. p["text"] = MTMD_DEFAULT_IMAGE_MARKER;
  588. p.erase("image_url");
  589. }
  590. }
  591. }
  592. common_chat_templates_inputs inputs;
  593. inputs.messages = common_chat_msgs_parse_oaicompat(messages);
  594. inputs.tools = common_chat_tools_parse_oaicompat(tools);
  595. inputs.tool_choice = common_chat_tool_choice_parse_oaicompat(json_value(body, "tool_choice", std::string("auto")));
  596. inputs.json_schema = json_schema.is_null() ? "" : json_schema.dump();
  597. inputs.grammar = grammar;
  598. inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true);
  599. inputs.use_jinja = use_jinja;
  600. inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false);
  601. inputs.extract_reasoning = reasoning_format != COMMON_REASONING_FORMAT_NONE;
  602. inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true);
  603. if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && body.contains("grammar")) {
  604. throw std::runtime_error("Cannot use custom grammar constraints with tools.");
  605. }
  606. // if the assistant message appears at the end of list, we do not add end-of-turn token
  607. // for ex. this can be useful to modify the reasoning process in reasoning models
  608. bool prefill_assistant_message = !inputs.messages.empty() && inputs.messages.back().role == "assistant";
  609. common_chat_msg last_message;
  610. if (prefill_assistant_message) {
  611. last_message = inputs.messages.back();
  612. inputs.messages.pop_back();
  613. /* sanity check, max one assistant message at the end of the list */
  614. if (!inputs.messages.empty() && inputs.messages.back().role == "assistant"){
  615. throw std::runtime_error("Cannot have 2 or more assistant messages at the end of the list.");
  616. }
  617. inputs.extract_reasoning = false;
  618. inputs.add_generation_prompt = true;
  619. }
  620. // Apply chat template to the list of messages
  621. auto chat_params = common_chat_templates_apply(tmpls, inputs);
  622. /* Append assistant prefilled message */
  623. if (prefill_assistant_message) {
  624. chat_params.prompt += last_message.content;
  625. }
  626. llama_params["chat_format"] = static_cast<int>(chat_params.format);
  627. llama_params["prompt"] = chat_params.prompt;
  628. if (!chat_params.grammar.empty()) {
  629. llama_params["grammar"] = chat_params.grammar;
  630. }
  631. llama_params["grammar_lazy"] = chat_params.grammar_lazy;
  632. auto grammar_triggers = json::array();
  633. for (const auto & trigger : chat_params.grammar_triggers) {
  634. server_grammar_trigger ct(trigger);
  635. grammar_triggers.push_back(ct.to_json());
  636. }
  637. llama_params["grammar_triggers"] = grammar_triggers;
  638. llama_params["preserved_tokens"] = chat_params.preserved_tokens;
  639. for (const auto & stop : chat_params.additional_stops) {
  640. llama_params["stop"].push_back(stop);
  641. }
  642. // Handle "n" field
  643. int n_choices = json_value(body, "n", 1);
  644. if (n_choices != 1) {
  645. throw std::runtime_error("Only one completion choice is allowed");
  646. }
  647. // Handle "logprobs" field
  648. // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
  649. if (json_value(body, "logprobs", false)) {
  650. llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
  651. } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) {
  652. throw std::runtime_error("top_logprobs requires logprobs to be set to true");
  653. }
  654. // Copy remaining properties to llama_params
  655. // This allows user to use llama.cpp-specific params like "mirostat", ... via OAI endpoint.
  656. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
  657. for (const auto & item : body.items()) {
  658. // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
  659. if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
  660. llama_params[item.key()] = item.value();
  661. }
  662. }
  663. return llama_params;
  664. }
  665. static json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64 = false) {
  666. json data = json::array();
  667. int32_t n_tokens = 0;
  668. int i = 0;
  669. for (const auto & elem : embeddings) {
  670. json embedding_obj;
  671. if (use_base64) {
  672. const auto& vec = json_value(elem, "embedding", json::array()).get<std::vector<float>>();
  673. const char* data_ptr = reinterpret_cast<const char*>(vec.data());
  674. size_t data_size = vec.size() * sizeof(float);
  675. embedding_obj = {
  676. {"embedding", base64::encode(data_ptr, data_size)},
  677. {"index", i++},
  678. {"object", "embedding"},
  679. {"encoding_format", "base64"}
  680. };
  681. } else {
  682. embedding_obj = {
  683. {"embedding", json_value(elem, "embedding", json::array())},
  684. {"index", i++},
  685. {"object", "embedding"}
  686. };
  687. }
  688. data.push_back(embedding_obj);
  689. n_tokens += json_value(elem, "tokens_evaluated", 0);
  690. }
  691. json res = json {
  692. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  693. {"object", "list"},
  694. {"usage", json {
  695. {"prompt_tokens", n_tokens},
  696. {"total_tokens", n_tokens}
  697. }},
  698. {"data", data}
  699. };
  700. return res;
  701. }
  702. static json format_response_rerank(
  703. const json & request,
  704. const json & ranks,
  705. bool is_tei_format,
  706. std::vector<std::string> & texts) {
  707. json res;
  708. if (is_tei_format) {
  709. // TEI response format
  710. res = json::array();
  711. bool return_text = json_value(request, "return_text", false);
  712. for (const auto & rank : ranks) {
  713. int index = json_value(rank, "index", 0);
  714. json elem = json{
  715. {"index", index},
  716. {"score", json_value(rank, "score", 0.0)},
  717. };
  718. if (return_text) {
  719. elem["text"] = std::move(texts[index]);
  720. }
  721. res.push_back(elem);
  722. }
  723. } else {
  724. // Jina response format
  725. json results = json::array();
  726. int32_t n_tokens = 0;
  727. for (const auto & rank : ranks) {
  728. results.push_back(json{
  729. {"index", json_value(rank, "index", 0)},
  730. {"relevance_score", json_value(rank, "score", 0.0)},
  731. });
  732. n_tokens += json_value(rank, "tokens_evaluated", 0);
  733. }
  734. res = json{
  735. {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
  736. {"object", "list"},
  737. {"usage", json{
  738. {"prompt_tokens", n_tokens},
  739. {"total_tokens", n_tokens}
  740. }},
  741. {"results", results}
  742. };
  743. }
  744. return res;
  745. }
  746. static bool is_valid_utf8(const std::string & str) {
  747. const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data());
  748. const unsigned char* end = bytes + str.length();
  749. while (bytes < end) {
  750. if (*bytes <= 0x7F) {
  751. // 1-byte sequence (0xxxxxxx)
  752. bytes++;
  753. } else if ((*bytes & 0xE0) == 0xC0) {
  754. // 2-byte sequence (110xxxxx 10xxxxxx)
  755. if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80)
  756. return false;
  757. bytes += 2;
  758. } else if ((*bytes & 0xF0) == 0xE0) {
  759. // 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx)
  760. if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80)
  761. return false;
  762. bytes += 3;
  763. } else if ((*bytes & 0xF8) == 0xF0) {
  764. // 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx)
  765. if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 ||
  766. (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80)
  767. return false;
  768. bytes += 4;
  769. } else {
  770. // Invalid UTF-8 lead byte
  771. return false;
  772. }
  773. }
  774. return true;
  775. }
  776. static json format_tokenizer_response(const json & tokens) {
  777. return json {
  778. {"tokens", tokens}
  779. };
  780. }
  781. static json format_detokenized_response(const std::string & content) {
  782. return json {
  783. {"content", content}
  784. };
  785. }
  786. static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) {
  787. json data = json::array();
  788. for (const auto & lb : logit_bias) {
  789. data.push_back(json{
  790. {"bias", lb.bias},
  791. {"token", lb.token},
  792. });
  793. }
  794. return data;
  795. }
  796. static std::string safe_json_to_str(const json & data) {
  797. return data.dump(-1, ' ', false, json::error_handler_t::replace);
  798. }
  799. static std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx) {
  800. std::vector<llama_token_data> cur;
  801. const auto * logits = llama_get_logits_ith(ctx, idx);
  802. const llama_model * model = llama_get_model(ctx);
  803. const llama_vocab * vocab = llama_model_get_vocab(model);
  804. const int n_vocab = llama_vocab_n_tokens(vocab);
  805. cur.resize(n_vocab);
  806. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  807. cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
  808. }
  809. // sort tokens by logits
  810. std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) {
  811. return a.logit > b.logit;
  812. });
  813. // apply softmax
  814. float max_l = cur[0].logit;
  815. float cum_sum = 0.0f;
  816. for (size_t i = 0; i < cur.size(); ++i) {
  817. float p = expf(cur[i].logit - max_l);
  818. cur[i].p = p;
  819. cum_sum += p;
  820. }
  821. for (size_t i = 0; i < cur.size(); ++i) {
  822. cur[i].p /= cum_sum;
  823. }
  824. return cur;
  825. }
  826. static bool are_lora_equal(
  827. const std::vector<common_adapter_lora_info> & l1,
  828. const std::vector<common_adapter_lora_info> & l2) {
  829. if (l1.size() != l2.size()) {
  830. return false;
  831. }
  832. for (size_t i = 0; i < l1.size(); ++i) {
  833. // we don't check lora.path to reduce the time complexity
  834. if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) {
  835. return false;
  836. }
  837. }
  838. return true;
  839. }
  840. // parse lora config from JSON request, returned a copy of lora_base with updated scale
  841. static std::vector<common_adapter_lora_info> parse_lora_request(
  842. const std::vector<common_adapter_lora_info> & lora_base,
  843. const json & data) {
  844. std::vector<common_adapter_lora_info> lora(lora_base);
  845. int max_idx = lora.size();
  846. // clear existing value
  847. for (auto & entry : lora) {
  848. entry.scale = 0.0f;
  849. }
  850. // set value
  851. for (const auto & entry : data) {
  852. int id = json_value(entry, "id", -1);
  853. float scale = json_value(entry, "scale", 0.0f);
  854. if (0 <= id && id < max_idx) {
  855. lora[id].scale = scale;
  856. } else {
  857. throw std::runtime_error("invalid adapter id");
  858. }
  859. }
  860. return lora;
  861. }
  862. //
  863. // utils for interacting with libmtmd
  864. // (may need to refactor in near future)
  865. //
  866. /**
  867. * server_tokens is a helper to manage the input tokens and image for the server.
  868. * it is made this way to simplify the logic of KV cache management.
  869. */
  870. struct server_tokens {
  871. bool has_mtmd = false;
  872. private: // disallow accessing these members directly, risking out-of-sync
  873. // map a **start** position in tokens to the image chunk
  874. std::unordered_map<llama_pos, mtmd::input_chunk_ptr> map_pos_to_image;
  875. // list of tokens
  876. // it can include LLAMA_TOKEN_NULL, which is used to indicate a token that is not a text token
  877. // a mtmd_input_chunk can occupy multiple tokens, one llama_token per **position**
  878. // important: for models using mrope, an image can contain multiple tokens but will use only one **position**
  879. llama_tokens tokens;
  880. // for ex. with input of 5 text tokens and 2 images:
  881. // [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1]
  882. // pos 0 1 2 3 4 5 6 7 8 9
  883. // map_pos_to_image will contain: {5, img0}, {8, img1}
  884. public:
  885. server_tokens() = default;
  886. ~server_tokens() = default;
  887. // Prevent copying
  888. server_tokens(const server_tokens&) = delete;
  889. server_tokens& operator=(const server_tokens&) = delete;
  890. // Allow moving (usually implicitly generated if members are movable)
  891. server_tokens(server_tokens&&) = default;
  892. server_tokens& operator=(server_tokens&&) = default;
  893. // Allow accessing elements using [] operator
  894. llama_token operator[](size_t index) { return tokens[index]; }
  895. const llama_token& operator[](size_t index) const { return tokens[index]; }
  896. server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd) : has_mtmd(has_mtmd) {
  897. for (size_t i = 0; i < mtmd_chunks.size(); ++i) {
  898. push_back(mtmd_chunks[i]);
  899. }
  900. }
  901. server_tokens(llama_tokens & tokens, bool has_mtmd) : has_mtmd(has_mtmd), tokens(tokens) {}
  902. // for debugging
  903. std::string str() const {
  904. std::ostringstream oss;
  905. oss << "tokens: ";
  906. for (const auto & t : tokens) {
  907. if (t == LLAMA_TOKEN_NULL) {
  908. oss << "<embd> ";
  909. } else {
  910. oss << t << " ";
  911. }
  912. }
  913. oss << "\n";
  914. oss << "image pos: ";
  915. for (const auto & it : map_pos_to_image) {
  916. oss << it.first << ", ";
  917. }
  918. return oss.str();
  919. }
  920. const mtmd::input_chunk_ptr & find_chunk(llama_pos pos) const {
  921. auto it = map_pos_to_image.find(pos);
  922. if (it != map_pos_to_image.end()) {
  923. return it->second;
  924. } else {
  925. throw std::runtime_error("Chunk not found");
  926. }
  927. }
  928. void push_back(llama_token tok) {
  929. if (tok == LLAMA_TOKEN_NULL) {
  930. throw std::runtime_error("Invalid token");
  931. }
  932. tokens.emplace_back(tok);
  933. }
  934. // will create a copy of the chunk if it contains non-text data
  935. void push_back(const mtmd_input_chunk * chunk) {
  936. auto type = mtmd_input_chunk_get_type(chunk);
  937. if (type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  938. GGML_ASSERT(has_mtmd);
  939. auto img_tokens = mtmd_input_chunk_get_tokens_image(chunk);
  940. const int n_pos = mtmd_image_tokens_get_n_pos(img_tokens);
  941. llama_pos start_pos = tokens.size();
  942. for (int i = 0; i < n_pos; ++i) {
  943. tokens.emplace_back(LLAMA_TOKEN_NULL);
  944. }
  945. mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk));
  946. map_pos_to_image[start_pos] = std::move(new_chunk);
  947. } else if (type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  948. size_t n_tokens;
  949. auto text_tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens);
  950. for (size_t i = 0; i < n_tokens; ++i) {
  951. push_back(text_tokens[i]);
  952. }
  953. } else {
  954. GGML_ABORT("Invalid chunk type");
  955. }
  956. }
  957. // for compatibility with context shift and prompt truncation
  958. void insert(const llama_tokens & inp_tokens) {
  959. GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
  960. tokens.insert(tokens.end(), inp_tokens.begin(), inp_tokens.end());
  961. }
  962. // for compatibility with speculative decoding, ctx shift, slot save/load
  963. const llama_tokens & get_text_tokens() const {
  964. GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
  965. return tokens;
  966. }
  967. // for compatibility with speculative decoding
  968. void set_token(llama_pos pos, llama_token id) {
  969. GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
  970. tokens[pos] = id;
  971. }
  972. size_t size() const {
  973. return tokens.size();
  974. }
  975. bool empty() const {
  976. return tokens.empty();
  977. }
  978. void clear() {
  979. tokens.clear();
  980. }
  981. void resize(size_t n) {
  982. GGML_ASSERT(n <= tokens.size());
  983. if (has_mtmd) {
  984. // we throw an error if we try to remove a token in the middle of an image
  985. // for ex. with input of 5 text tokens and 2 images:
  986. // [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1]
  987. // n 1 2 3 4 5 6 7 8 9 10
  988. // allowed to resize ^ ^
  989. // disallowed to resize ^ ^ ^
  990. if (n > 0) {
  991. llama_token last_token = tokens[n - 1];
  992. // make sure we never remove tokens in the middle of an image
  993. if (last_token == LLAMA_TOKEN_NULL) {
  994. find_chunk(n - 1); // will throw an error if the token is not begin-of-chunk
  995. }
  996. }
  997. // remove all image chunks that are not used anymore
  998. for (auto it = map_pos_to_image.begin(); it != map_pos_to_image.end(); ) {
  999. llama_pos pos = it->first;
  1000. if (pos >= (llama_pos)n) {
  1001. it = map_pos_to_image.erase(it);
  1002. } else {
  1003. ++it;
  1004. }
  1005. }
  1006. }
  1007. tokens.resize(n);
  1008. }
  1009. std::string detokenize(const llama_context * ctx, bool special) const {
  1010. llama_tokens text_tokens;
  1011. text_tokens.reserve(tokens.size());
  1012. for (const auto & t : tokens) {
  1013. if (t != LLAMA_TOKEN_NULL) {
  1014. text_tokens.push_back(t);
  1015. }
  1016. }
  1017. return common_detokenize(ctx, text_tokens, special);
  1018. }
  1019. size_t get_common_prefix(const server_tokens & b) const {
  1020. size_t max_idx = std::min(tokens.size(), b.tokens.size());
  1021. for (size_t i = 0; i < max_idx; ++i) {
  1022. auto & ai = tokens[i];
  1023. auto & bi = b.tokens[i];
  1024. if (ai == LLAMA_TOKEN_NULL && bi == LLAMA_TOKEN_NULL) {
  1025. GGML_ASSERT(has_mtmd);
  1026. const auto & a_chunk = find_chunk(i);
  1027. const auto & b_chunk = b.find_chunk(i);
  1028. GGML_ASSERT(a_chunk && b_chunk);
  1029. const auto * a_img = mtmd_input_chunk_get_tokens_image(a_chunk.get());
  1030. const auto * b_img = mtmd_input_chunk_get_tokens_image(b_chunk.get());
  1031. std::string ai_id = mtmd_image_tokens_get_id(a_img);
  1032. std::string bi_id = mtmd_image_tokens_get_id(b_img);
  1033. size_t a_pos = mtmd_image_tokens_get_n_pos(a_img);
  1034. size_t b_pos = mtmd_image_tokens_get_n_pos(b_img);
  1035. if (ai_id == bi_id && a_pos == b_pos) {
  1036. GGML_ASSERT(a_pos > 0 && "Invalid image token"); // should never happen
  1037. i += a_pos - 1; // will be +1 by the for loop
  1038. continue;
  1039. } else {
  1040. return i;
  1041. }
  1042. } else if (ai == bi) {
  1043. continue;
  1044. } else {
  1045. return i;
  1046. }
  1047. }
  1048. return max_idx; // all tokens are equal
  1049. }
  1050. // make sure all text tokens are within the vocab range
  1051. bool validate(const struct llama_context * ctx) const {
  1052. const llama_model * model = llama_get_model(ctx);
  1053. const llama_vocab * vocab = llama_model_get_vocab(model);
  1054. const int32_t n_vocab = llama_vocab_n_tokens(vocab);
  1055. for (size_t i = 0; i < tokens.size(); ++i) {
  1056. auto & t = tokens[i];
  1057. if (t == LLAMA_TOKEN_NULL) {
  1058. try {
  1059. const auto & chunk = find_chunk(i);
  1060. const auto * img_tokens = mtmd_input_chunk_get_tokens_image(chunk.get());
  1061. size_t n_pos = mtmd_image_tokens_get_n_pos(img_tokens);
  1062. i += n_pos - 1; // will be +1 by the for loop
  1063. } catch (const std::exception & e) {
  1064. return false;
  1065. }
  1066. } else if (t < 0 || t >= n_vocab) {
  1067. return false;
  1068. }
  1069. }
  1070. return true;
  1071. }
  1072. // encode and decode the image chunk
  1073. int32_t process_chunk(
  1074. llama_context * ctx,
  1075. mtmd_context * mctx,
  1076. llama_pos n_past,
  1077. int32_t seq_id,
  1078. llama_pos & n_pos_out) {
  1079. auto it = map_pos_to_image.find(n_past);
  1080. if (it == map_pos_to_image.end()) {
  1081. throw std::runtime_error("Chunk not found");
  1082. }
  1083. SRV_INF("%s\n", "processing image...");
  1084. int32_t n_batch = llama_n_batch(ctx);
  1085. int64_t t0 = ggml_time_ms();
  1086. llama_pos new_n_past = n_past;
  1087. int32_t result = mtmd_helper_eval_chunk_single(mctx, ctx,
  1088. it->second.get(), // chunk
  1089. n_past,
  1090. seq_id,
  1091. n_batch,
  1092. true, // logits last
  1093. &new_n_past);
  1094. SRV_INF("image processed in %" PRId64 " ms\n", ggml_time_ms() - t0);
  1095. if (result != 0) {
  1096. LOG_ERR("mtmd_helper_eval failed with status %d", result);
  1097. n_pos_out = n_past;
  1098. return result;
  1099. }
  1100. n_pos_out = new_n_past;
  1101. return 0;
  1102. }
  1103. };
  1104. // Computes FNV-1a hash of the data
  1105. static std::string fnv_hash(const uint8_t * data, size_t len) {
  1106. const uint64_t fnv_prime = 0x100000001b3ULL;
  1107. uint64_t hash = 0xcbf29ce484222325ULL;
  1108. for (size_t i = 0; i < len; ++i) {
  1109. hash ^= data[i];
  1110. hash *= fnv_prime;
  1111. }
  1112. return std::to_string(hash);
  1113. }