server-task.cpp 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640
  1. #include "server-common.h"
  2. #include "server-task.h"
  3. #include "common.h"
  4. #include "llama.h"
  5. #include "chat.h"
  6. #include "sampling.h"
  7. #include "json-schema-to-grammar.h"
  8. using json = nlohmann::ordered_json;
  9. //
  10. // task_params
  11. //
  12. json task_params::format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) const {
  13. json data = json::array();
  14. for (const auto & lb : logit_bias) {
  15. data.push_back(json{
  16. {"bias", lb.bias},
  17. {"token", lb.token},
  18. });
  19. }
  20. return data;
  21. }
  22. json task_params::to_json(bool only_metrics) const {
  23. std::vector<std::string> samplers;
  24. samplers.reserve(sampling.samplers.size());
  25. for (const auto & sampler : sampling.samplers) {
  26. samplers.emplace_back(common_sampler_type_to_str(sampler));
  27. }
  28. json lora = json::array();
  29. for (auto & it : this->lora) {
  30. lora.push_back({{"id", it.first}, {"scale", it.second}});
  31. }
  32. if (only_metrics) {
  33. return json {
  34. {"seed", sampling.seed},
  35. {"temperature", sampling.temp},
  36. {"dynatemp_range", sampling.dynatemp_range},
  37. {"dynatemp_exponent", sampling.dynatemp_exponent},
  38. {"top_k", sampling.top_k},
  39. {"top_p", sampling.top_p},
  40. {"min_p", sampling.min_p},
  41. {"top_n_sigma", sampling.top_n_sigma},
  42. {"xtc_probability", sampling.xtc_probability},
  43. {"xtc_threshold", sampling.xtc_threshold},
  44. {"typical_p", sampling.typ_p},
  45. {"repeat_last_n", sampling.penalty_last_n},
  46. {"repeat_penalty", sampling.penalty_repeat},
  47. {"presence_penalty", sampling.penalty_present},
  48. {"frequency_penalty", sampling.penalty_freq},
  49. {"dry_multiplier", sampling.dry_multiplier},
  50. {"dry_base", sampling.dry_base},
  51. {"dry_allowed_length", sampling.dry_allowed_length},
  52. {"dry_penalty_last_n", sampling.dry_penalty_last_n},
  53. {"mirostat", sampling.mirostat},
  54. {"mirostat_tau", sampling.mirostat_tau},
  55. {"mirostat_eta", sampling.mirostat_eta},
  56. {"max_tokens", n_predict},
  57. {"n_predict", n_predict}, // TODO: deduplicate?
  58. {"n_keep", n_keep},
  59. {"n_discard", n_discard},
  60. {"ignore_eos", sampling.ignore_eos},
  61. {"stream", stream},
  62. {"n_probs", sampling.n_probs},
  63. {"min_keep", sampling.min_keep},
  64. {"chat_format", common_chat_format_name(oaicompat_chat_syntax.format)},
  65. {"reasoning_format", common_reasoning_format_name(oaicompat_chat_syntax.reasoning_format)},
  66. {"reasoning_in_content", oaicompat_chat_syntax.reasoning_in_content},
  67. {"thinking_forced_open", oaicompat_chat_syntax.thinking_forced_open},
  68. {"samplers", samplers},
  69. {"speculative.n_max", speculative.n_max},
  70. {"speculative.n_min", speculative.n_min},
  71. {"speculative.p_min", speculative.p_min},
  72. {"timings_per_token", timings_per_token},
  73. {"post_sampling_probs", post_sampling_probs},
  74. {"backend_sampling", sampling.backend_sampling},
  75. {"lora", lora},
  76. };
  77. }
  78. auto grammar_triggers = json::array();
  79. for (const auto & trigger : sampling.grammar_triggers) {
  80. server_grammar_trigger ct(trigger);
  81. grammar_triggers.push_back(ct.to_json());
  82. }
  83. return json {
  84. {"seed", sampling.seed},
  85. {"temperature", sampling.temp},
  86. {"dynatemp_range", sampling.dynatemp_range},
  87. {"dynatemp_exponent", sampling.dynatemp_exponent},
  88. {"top_k", sampling.top_k},
  89. {"top_p", sampling.top_p},
  90. {"min_p", sampling.min_p},
  91. {"top_n_sigma", sampling.top_n_sigma},
  92. {"xtc_probability", sampling.xtc_probability},
  93. {"xtc_threshold", sampling.xtc_threshold},
  94. {"typical_p", sampling.typ_p},
  95. {"repeat_last_n", sampling.penalty_last_n},
  96. {"repeat_penalty", sampling.penalty_repeat},
  97. {"presence_penalty", sampling.penalty_present},
  98. {"frequency_penalty", sampling.penalty_freq},
  99. {"dry_multiplier", sampling.dry_multiplier},
  100. {"dry_base", sampling.dry_base},
  101. {"dry_allowed_length", sampling.dry_allowed_length},
  102. {"dry_penalty_last_n", sampling.dry_penalty_last_n},
  103. {"dry_sequence_breakers", sampling.dry_sequence_breakers},
  104. {"mirostat", sampling.mirostat},
  105. {"mirostat_tau", sampling.mirostat_tau},
  106. {"mirostat_eta", sampling.mirostat_eta},
  107. {"stop", antiprompt},
  108. {"max_tokens", n_predict},
  109. {"n_predict", n_predict}, // TODO: deduplicate?
  110. {"n_keep", n_keep},
  111. {"n_discard", n_discard},
  112. {"ignore_eos", sampling.ignore_eos},
  113. {"stream", stream},
  114. {"logit_bias", format_logit_bias(sampling.logit_bias)},
  115. {"n_probs", sampling.n_probs},
  116. {"min_keep", sampling.min_keep},
  117. {"grammar", sampling.grammar},
  118. {"grammar_lazy", sampling.grammar_lazy},
  119. {"grammar_triggers", grammar_triggers},
  120. {"preserved_tokens", sampling.preserved_tokens},
  121. {"chat_format", common_chat_format_name(oaicompat_chat_syntax.format)},
  122. {"reasoning_format", common_reasoning_format_name(oaicompat_chat_syntax.reasoning_format)},
  123. {"reasoning_in_content", oaicompat_chat_syntax.reasoning_in_content},
  124. {"thinking_forced_open", oaicompat_chat_syntax.thinking_forced_open},
  125. {"samplers", samplers},
  126. {"speculative.n_max", speculative.n_max},
  127. {"speculative.n_min", speculative.n_min},
  128. {"speculative.p_min", speculative.p_min},
  129. {"timings_per_token", timings_per_token},
  130. {"post_sampling_probs", post_sampling_probs},
  131. {"backend_sampling", sampling.backend_sampling},
  132. {"lora", lora},
  133. };
  134. }
  135. //
  136. // server_task
  137. //
  138. task_params server_task::params_from_json_cmpl(
  139. const llama_vocab * vocab,
  140. const common_params & params_base,
  141. const int n_ctx_slot,
  142. const json & data) {
  143. task_params params;
  144. // Sampling parameter defaults are loaded from the global server context (but individual requests can still them)
  145. task_params defaults;
  146. defaults.sampling = params_base.sampling;
  147. defaults.speculative = params_base.speculative;
  148. defaults.n_keep = params_base.n_keep;
  149. defaults.n_predict = params_base.n_predict;
  150. defaults.n_cache_reuse = params_base.n_cache_reuse;
  151. defaults.antiprompt = params_base.antiprompt;
  152. // enabling this will output extra debug information in the HTTP responses from the server
  153. params.verbose = params_base.verbosity > 9;
  154. params.timings_per_token = json_value(data, "timings_per_token", false);
  155. params.stream = json_value(data, "stream", false);
  156. auto stream_opt = json_value(data, "stream_options", json::object());
  157. params.include_usage = json_value(stream_opt, "include_usage", false);
  158. params.cache_prompt = json_value(data, "cache_prompt", true);
  159. params.return_tokens = json_value(data, "return_tokens", false);
  160. params.return_progress = json_value(data, "return_progress", false);
  161. params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict));
  162. params.n_indent = json_value(data, "n_indent", defaults.n_indent);
  163. params.n_keep = json_value(data, "n_keep", defaults.n_keep);
  164. params.n_discard = json_value(data, "n_discard", defaults.n_discard);
  165. params.n_cmpl = json_value(data, "n_cmpl", json_value(data, "n", 1));
  166. params.n_cache_reuse = json_value(data, "n_cache_reuse", defaults.n_cache_reuse);
  167. //params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", defaults.t_max_prompt_ms); // TODO: implement
  168. params.t_max_predict_ms = json_value(data, "t_max_predict_ms", defaults.t_max_predict_ms);
  169. params.response_fields = json_value(data, "response_fields", std::vector<std::string>());
  170. params.sampling.top_k = json_value(data, "top_k", defaults.sampling.top_k);
  171. params.sampling.top_p = json_value(data, "top_p", defaults.sampling.top_p);
  172. params.sampling.min_p = json_value(data, "min_p", defaults.sampling.min_p);
  173. params.sampling.top_n_sigma = json_value(data, "top_n_sigma", defaults.sampling.top_n_sigma);
  174. params.sampling.xtc_probability = json_value(data, "xtc_probability", defaults.sampling.xtc_probability);
  175. params.sampling.xtc_threshold = json_value(data, "xtc_threshold", defaults.sampling.xtc_threshold);
  176. params.sampling.typ_p = json_value(data, "typical_p", defaults.sampling.typ_p);
  177. params.sampling.temp = json_value(data, "temperature", defaults.sampling.temp);
  178. params.sampling.dynatemp_range = json_value(data, "dynatemp_range", defaults.sampling.dynatemp_range);
  179. params.sampling.dynatemp_exponent = json_value(data, "dynatemp_exponent", defaults.sampling.dynatemp_exponent);
  180. params.sampling.penalty_last_n = json_value(data, "repeat_last_n", defaults.sampling.penalty_last_n);
  181. params.sampling.penalty_repeat = json_value(data, "repeat_penalty", defaults.sampling.penalty_repeat);
  182. params.sampling.penalty_freq = json_value(data, "frequency_penalty", defaults.sampling.penalty_freq);
  183. params.sampling.penalty_present = json_value(data, "presence_penalty", defaults.sampling.penalty_present);
  184. params.sampling.dry_multiplier = json_value(data, "dry_multiplier", defaults.sampling.dry_multiplier);
  185. params.sampling.dry_base = json_value(data, "dry_base", defaults.sampling.dry_base);
  186. params.sampling.dry_allowed_length = json_value(data, "dry_allowed_length", defaults.sampling.dry_allowed_length);
  187. params.sampling.dry_penalty_last_n = json_value(data, "dry_penalty_last_n", defaults.sampling.dry_penalty_last_n);
  188. params.sampling.mirostat = json_value(data, "mirostat", defaults.sampling.mirostat);
  189. params.sampling.mirostat_tau = json_value(data, "mirostat_tau", defaults.sampling.mirostat_tau);
  190. params.sampling.mirostat_eta = json_value(data, "mirostat_eta", defaults.sampling.mirostat_eta);
  191. params.sampling.seed = json_value(data, "seed", defaults.sampling.seed);
  192. params.sampling.n_probs = json_value(data, "n_probs", defaults.sampling.n_probs);
  193. params.sampling.min_keep = json_value(data, "min_keep", defaults.sampling.min_keep);
  194. params.sampling.backend_sampling = json_value(data, "backend_sampling", defaults.sampling.backend_sampling);
  195. params.post_sampling_probs = json_value(data, "post_sampling_probs", defaults.post_sampling_probs);
  196. params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min);
  197. params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max);
  198. params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min);
  199. params.speculative.n_min = std::min(params.speculative.n_max, params.speculative.n_min);
  200. params.speculative.n_min = std::max(params.speculative.n_min, 0);
  201. params.speculative.n_max = std::max(params.speculative.n_max, 0);
  202. // Use OpenAI API logprobs only if n_probs wasn't provided
  203. if (data.contains("logprobs") && params.sampling.n_probs == defaults.sampling.n_probs){
  204. params.sampling.n_probs = json_value(data, "logprobs", defaults.sampling.n_probs);
  205. }
  206. if (data.contains("lora")) {
  207. if (data.at("lora").is_array()) {
  208. params.lora = parse_lora_request(data.at("lora"));
  209. } else {
  210. throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields");
  211. }
  212. } else {
  213. params.lora = {};
  214. }
  215. // TODO: add more sanity checks for the input parameters
  216. if (params.sampling.penalty_last_n < -1) {
  217. throw std::runtime_error("Error: repeat_last_n must be >= -1");
  218. }
  219. if (params.sampling.dry_penalty_last_n < -1) {
  220. throw std::runtime_error("Error: dry_penalty_last_n must be >= -1");
  221. }
  222. if (params.sampling.penalty_last_n == -1) {
  223. // note: should be the slot's context and not the full context, but it's ok
  224. params.sampling.penalty_last_n = n_ctx_slot;
  225. }
  226. if (params.sampling.dry_penalty_last_n == -1) {
  227. params.sampling.dry_penalty_last_n = n_ctx_slot;
  228. }
  229. if (params.sampling.dry_base < 1.0f) {
  230. params.sampling.dry_base = defaults.sampling.dry_base;
  231. }
  232. // sequence breakers for DRY
  233. {
  234. // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format
  235. // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39
  236. if (data.contains("dry_sequence_breakers")) {
  237. params.sampling.dry_sequence_breakers = json_value(data, "dry_sequence_breakers", std::vector<std::string>());
  238. if (params.sampling.dry_sequence_breakers.empty()) {
  239. throw std::runtime_error("Error: dry_sequence_breakers must be a non-empty array of strings");
  240. }
  241. }
  242. }
  243. // process "json_schema" and "grammar"
  244. if (data.contains("json_schema") && !data.contains("grammar")) {
  245. try {
  246. auto schema = json_value(data, "json_schema", json::object());
  247. SRV_DBG("JSON schema: %s\n", schema.dump(2).c_str());
  248. params.sampling.grammar = json_schema_to_grammar(schema);
  249. SRV_DBG("Converted grammar: %s\n", params.sampling.grammar.c_str());
  250. } catch (const std::exception & e) {
  251. throw std::runtime_error(std::string("\"json_schema\": ") + e.what());
  252. }
  253. } else {
  254. params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar);
  255. SRV_DBG("Grammar: %s\n", params.sampling.grammar.c_str());
  256. params.sampling.grammar_lazy = json_value(data, "grammar_lazy", defaults.sampling.grammar_lazy);
  257. SRV_DBG("Grammar lazy: %s\n", params.sampling.grammar_lazy ? "true" : "false");
  258. }
  259. {
  260. auto it = data.find("chat_format");
  261. if (it != data.end()) {
  262. params.oaicompat_chat_syntax.format = static_cast<common_chat_format>(it->get<int>());
  263. SRV_INF("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_syntax.format));
  264. } else {
  265. params.oaicompat_chat_syntax.format = defaults.oaicompat_chat_syntax.format;
  266. }
  267. common_reasoning_format reasoning_format = params_base.reasoning_format;
  268. if (data.contains("reasoning_format")) {
  269. reasoning_format = common_reasoning_format_from_name(data.at("reasoning_format").get<std::string>());
  270. }
  271. params.oaicompat_chat_syntax.reasoning_format = reasoning_format;
  272. params.oaicompat_chat_syntax.reasoning_in_content = params.stream && (reasoning_format == COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY);
  273. params.oaicompat_chat_syntax.thinking_forced_open = json_value(data, "thinking_forced_open", false);
  274. params.oaicompat_chat_syntax.parse_tool_calls = json_value(data, "parse_tool_calls", false);
  275. if (data.contains("chat_parser")) {
  276. params.oaicompat_chat_syntax.parser.load(data.at("chat_parser").get<std::string>());
  277. }
  278. }
  279. {
  280. const auto preserved_tokens = data.find("preserved_tokens");
  281. if (preserved_tokens != data.end()) {
  282. for (const auto & t : *preserved_tokens) {
  283. auto ids = common_tokenize(vocab, t.get<std::string>(), /* add_special= */ false, /* parse_special= */ true);
  284. if (ids.size() == 1) {
  285. SRV_DBG("Preserved token: %d\n", ids[0]);
  286. params.sampling.preserved_tokens.insert(ids[0]);
  287. } else {
  288. // This may happen when using a tool call style meant for a model with special tokens to preserve on a model without said tokens.
  289. SRV_DBG("Not preserved because more than 1 token: %s\n", t.get<std::string>().c_str());
  290. }
  291. }
  292. }
  293. const auto grammar_triggers = data.find("grammar_triggers");
  294. if (grammar_triggers != data.end()) {
  295. for (const auto & t : *grammar_triggers) {
  296. server_grammar_trigger ct(t);
  297. if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_WORD) {
  298. const auto & word = ct.value.value;
  299. auto ids = common_tokenize(vocab, word, /* add_special= */ false, /* parse_special= */ true);
  300. if (ids.size() == 1) {
  301. auto token = ids[0];
  302. if (std::find(params.sampling.preserved_tokens.begin(), params.sampling.preserved_tokens.end(), (llama_token) token) == params.sampling.preserved_tokens.end()) {
  303. throw std::runtime_error("Grammar trigger word should be marked as preserved token: " + word);
  304. }
  305. SRV_DBG("Grammar trigger token: %d (`%s`)\n", token, word.c_str());
  306. common_grammar_trigger trigger;
  307. trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN;
  308. trigger.value = word;
  309. trigger.token = token;
  310. params.sampling.grammar_triggers.push_back(std::move(trigger));
  311. } else {
  312. SRV_DBG("Grammar trigger word: `%s`\n", word.c_str());
  313. params.sampling.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, word});
  314. }
  315. } else {
  316. if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN) {
  317. SRV_DBG("Grammar trigger pattern: `%s`\n", ct.value.value.c_str());
  318. } else if (ct.value.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL) {
  319. SRV_DBG("Grammar trigger pattern full: `%s`\n", ct.value.value.c_str());
  320. } else {
  321. throw std::runtime_error("Unknown grammar trigger type");
  322. }
  323. params.sampling.grammar_triggers.emplace_back(std::move(ct.value));
  324. }
  325. }
  326. }
  327. if (params.sampling.grammar_lazy && params.sampling.grammar_triggers.empty()) {
  328. throw std::runtime_error("Error: no triggers set for lazy grammar!");
  329. }
  330. }
  331. {
  332. params.sampling.logit_bias.clear();
  333. const auto & logit_bias = data.find("logit_bias");
  334. if (logit_bias != data.end() && logit_bias->is_array()) {
  335. const int n_vocab = llama_vocab_n_tokens(vocab);
  336. for (const auto & el : *logit_bias) {
  337. // TODO: we may want to throw errors here, in case "el" is incorrect
  338. if (el.is_array() && el.size() == 2) {
  339. float bias;
  340. if (el[1].is_number()) {
  341. bias = el[1].get<float>();
  342. } else if (el[1].is_boolean() && !el[1].get<bool>()) {
  343. bias = -INFINITY;
  344. } else {
  345. continue;
  346. }
  347. if (el[0].is_number_integer()) {
  348. llama_token tok = el[0].get<llama_token>();
  349. if (tok >= 0 && tok < n_vocab) {
  350. params.sampling.logit_bias.push_back({tok, bias});
  351. }
  352. } else if (el[0].is_string()) {
  353. auto toks = common_tokenize(vocab, el[0].get<std::string>(), false);
  354. for (auto tok : toks) {
  355. params.sampling.logit_bias.push_back({tok, bias});
  356. }
  357. }
  358. }
  359. }
  360. } else if (logit_bias != data.end() && logit_bias->is_object()) {
  361. const int n_vocab = llama_vocab_n_tokens(vocab);
  362. for (const auto & el : logit_bias->items()) {
  363. float bias;
  364. const auto & key = el.key();
  365. const auto & value = el.value();
  366. if (value.is_number()) {
  367. bias = value.get<float>();
  368. } else if (value.is_boolean() && !value.get<bool>()) {
  369. bias = -INFINITY;
  370. } else {
  371. continue;
  372. }
  373. char *end;
  374. llama_token tok = strtol(key.c_str(), &end, 10);
  375. if (*end == 0) {
  376. if (tok >= 0 && tok < n_vocab) {
  377. params.sampling.logit_bias.push_back({tok, bias});
  378. }
  379. } else {
  380. auto toks = common_tokenize(vocab, key, false);
  381. for (auto tok : toks) {
  382. params.sampling.logit_bias.push_back({tok, bias});
  383. }
  384. }
  385. }
  386. }
  387. params.sampling.ignore_eos = json_value(data, "ignore_eos", params_base.sampling.ignore_eos);
  388. if (params.sampling.ignore_eos) {
  389. params.sampling.logit_bias.insert(
  390. params.sampling.logit_bias.end(),
  391. defaults.sampling.logit_bias_eog.begin(), defaults.sampling.logit_bias_eog.end());
  392. }
  393. }
  394. {
  395. params.antiprompt.clear();
  396. const auto & stop = data.find("stop");
  397. if (stop != data.end() && stop->is_array()) {
  398. for (const auto & word : *stop) {
  399. if (!word.empty()) {
  400. params.antiprompt.push_back(word);
  401. }
  402. }
  403. }
  404. // set reverse prompt from cli args if not set in the request
  405. if (params.antiprompt.empty()) {
  406. params.antiprompt = defaults.antiprompt;
  407. }
  408. }
  409. {
  410. const auto samplers = data.find("samplers");
  411. if (samplers != data.end()) {
  412. if (samplers->is_array()) {
  413. params.sampling.samplers = common_sampler_types_from_names(*samplers, false);
  414. } else if (samplers->is_string()){
  415. params.sampling.samplers = common_sampler_types_from_chars(samplers->get<std::string>());
  416. }
  417. } else {
  418. params.sampling.samplers = defaults.sampling.samplers;
  419. }
  420. }
  421. if (params.n_cmpl > params_base.n_parallel) {
  422. throw std::runtime_error("n_cmpl cannot be greater than the number of slots, please increase -np");
  423. }
  424. return params;
  425. }
  426. //
  427. // result_timings
  428. //
  429. json result_timings::to_json() const {
  430. json base = {
  431. {"cache_n", cache_n},
  432. {"prompt_n", prompt_n},
  433. {"prompt_ms", prompt_ms},
  434. {"prompt_per_token_ms", prompt_per_token_ms},
  435. {"prompt_per_second", prompt_per_second},
  436. {"predicted_n", predicted_n},
  437. {"predicted_ms", predicted_ms},
  438. {"predicted_per_token_ms", predicted_per_token_ms},
  439. {"predicted_per_second", predicted_per_second},
  440. };
  441. if (draft_n > 0) {
  442. base["draft_n"] = draft_n;
  443. base["draft_n_accepted"] = draft_n_accepted;
  444. }
  445. return base;
  446. }
  447. //
  448. // result_prompt_progress
  449. //
  450. json result_prompt_progress::to_json() const {
  451. return json {
  452. {"total", total},
  453. {"cache", cache},
  454. {"processed", processed},
  455. {"time_ms", time_ms},
  456. };
  457. }
  458. static inline std::string stop_type_to_str(stop_type type) {
  459. switch (type) {
  460. case STOP_TYPE_EOS: return "eos";
  461. case STOP_TYPE_WORD: return "word";
  462. case STOP_TYPE_LIMIT: return "limit";
  463. default: return "none";
  464. }
  465. }
  466. //
  467. // completion_token_output
  468. //
  469. json completion_token_output::to_json(bool post_sampling_probs) const {
  470. json probs_for_token = json::array();
  471. for (const auto & p : probs) {
  472. std::string txt(p.txt);
  473. txt.resize(validate_utf8(txt));
  474. probs_for_token.push_back(json {
  475. {"id", p.tok},
  476. {"token", txt},
  477. {"bytes", str_to_bytes(p.txt)},
  478. {
  479. post_sampling_probs ? "prob" : "logprob",
  480. post_sampling_probs ? p.prob : logarithm(p.prob)
  481. },
  482. });
  483. }
  484. return probs_for_token;
  485. }
  486. json completion_token_output::probs_vector_to_json(const std::vector<completion_token_output> & probs, bool post_sampling_probs) {
  487. json out = json::array();
  488. for (const auto & p : probs) {
  489. std::string txt(p.text_to_send);
  490. txt.resize(validate_utf8(txt));
  491. out.push_back(json {
  492. {"id", p.tok},
  493. {"token", txt},
  494. {"bytes", str_to_bytes(p.text_to_send)},
  495. {
  496. post_sampling_probs ? "prob" : "logprob",
  497. post_sampling_probs ? p.prob : logarithm(p.prob)
  498. },
  499. {
  500. post_sampling_probs ? "top_probs" : "top_logprobs",
  501. p.to_json(post_sampling_probs)
  502. },
  503. });
  504. }
  505. return out;
  506. }
  507. float completion_token_output::logarithm(float x) {
  508. // nlohmann::json converts -inf to null, so we need to prevent that
  509. return x == 0.0f ? std::numeric_limits<float>::lowest() : std::log(x);
  510. }
  511. std::vector<unsigned char> completion_token_output::str_to_bytes(const std::string & str) {
  512. std::vector<unsigned char> bytes;
  513. for (unsigned char c : str) {
  514. bytes.push_back(c);
  515. }
  516. return bytes;
  517. }
  518. //
  519. // server_task_result_cmpl_final
  520. //
  521. json server_task_result_cmpl_final::to_json() {
  522. GGML_ASSERT(is_updated && "update() must be called before to_json()");
  523. switch (res_type) {
  524. case TASK_RESPONSE_TYPE_NONE:
  525. return to_json_non_oaicompat();
  526. case TASK_RESPONSE_TYPE_OAI_CMPL:
  527. return to_json_oaicompat();
  528. case TASK_RESPONSE_TYPE_OAI_CHAT:
  529. return stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat();
  530. case TASK_RESPONSE_TYPE_ANTHROPIC:
  531. return stream ? to_json_anthropic_stream() : to_json_anthropic();
  532. default:
  533. GGML_ASSERT(false && "Invalid task_response_type");
  534. }
  535. }
  536. json server_task_result_cmpl_final::to_json_non_oaicompat() {
  537. json res = json {
  538. {"index", index},
  539. {"content", content},
  540. {"tokens", tokens},
  541. {"id_slot", id_slot},
  542. {"stop", true},
  543. {"model", oaicompat_model},
  544. {"tokens_predicted", n_decoded},
  545. {"tokens_evaluated", n_prompt_tokens},
  546. {"generation_settings", generation_params.to_json()},
  547. {"prompt", prompt},
  548. {"has_new_line", has_new_line},
  549. {"truncated", truncated},
  550. {"stop_type", stop_type_to_str(stop)},
  551. {"stopping_word", stopping_word},
  552. {"tokens_cached", n_tokens_cached},
  553. {"timings", timings.to_json()},
  554. };
  555. if (!stream && !probs_output.empty()) {
  556. res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs);
  557. }
  558. return response_fields.empty() ? res : json_get_nested_values(response_fields, res);
  559. }
  560. json server_task_result_cmpl_final::to_json_oaicompat() {
  561. std::time_t t = std::time(0);
  562. json logprobs = json(nullptr); // OAI default to null
  563. if (!stream && probs_output.size() > 0) {
  564. logprobs = json{
  565. {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)},
  566. };
  567. }
  568. json finish_reason = "length";
  569. if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
  570. finish_reason = "stop";
  571. }
  572. json res = json {
  573. {"choices", json::array({
  574. json{
  575. {"text", content},
  576. {"index", index},
  577. {"logprobs", logprobs},
  578. {"finish_reason", finish_reason},
  579. }
  580. })},
  581. {"created", t},
  582. {"model", oaicompat_model},
  583. {"system_fingerprint", build_info},
  584. {"object", "text_completion"},
  585. {"usage", json {
  586. {"completion_tokens", n_decoded},
  587. {"prompt_tokens", n_prompt_tokens},
  588. {"total_tokens", n_decoded + n_prompt_tokens}
  589. }},
  590. {"id", oaicompat_cmpl_id}
  591. };
  592. // extra fields for debugging purposes
  593. if (verbose) {
  594. res["__verbose"] = to_json_non_oaicompat();
  595. }
  596. if (timings.prompt_n >= 0) {
  597. res.push_back({"timings", timings.to_json()});
  598. }
  599. return res;
  600. }
  601. json server_task_result_cmpl_final::to_json_oaicompat_chat() {
  602. std::string finish_reason = "length";
  603. common_chat_msg msg;
  604. if (!oaicompat_msg.empty()) {
  605. msg = oaicompat_msg;
  606. } else {
  607. msg.role = "assistant";
  608. msg.content = content;
  609. }
  610. if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
  611. finish_reason = msg.tool_calls.empty() ? "stop" : "tool_calls";
  612. }
  613. json choice {
  614. {"finish_reason", finish_reason},
  615. {"index", index},
  616. {"message", msg.to_json_oaicompat<json>()},
  617. };
  618. if (!stream && probs_output.size() > 0) {
  619. choice["logprobs"] = json{
  620. {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)},
  621. };
  622. }
  623. std::time_t t = std::time(0);
  624. json res = json {
  625. {"choices", json::array({choice})},
  626. {"created", t},
  627. {"model", oaicompat_model},
  628. {"system_fingerprint", build_info},
  629. {"object", "chat.completion"},
  630. {"usage", json {
  631. {"completion_tokens", n_decoded},
  632. {"prompt_tokens", n_prompt_tokens},
  633. {"total_tokens", n_decoded + n_prompt_tokens}
  634. }},
  635. {"id", oaicompat_cmpl_id}
  636. };
  637. // extra fields for debugging purposes
  638. if (verbose) {
  639. res["__verbose"] = to_json_non_oaicompat();
  640. }
  641. if (timings.prompt_n >= 0) {
  642. res.push_back({"timings", timings.to_json()});
  643. }
  644. return res;
  645. }
  646. common_chat_msg task_result_state::update_chat_msg(
  647. const std::string & text_added,
  648. bool is_partial,
  649. std::vector<common_chat_msg_diff> & diffs) {
  650. generated_text += text_added;
  651. auto msg_prv_copy = chat_msg;
  652. SRV_DBG("Parsing chat message: %s\n", generated_text.c_str());
  653. auto new_msg = common_chat_parse(
  654. generated_text,
  655. is_partial,
  656. oaicompat_chat_syntax);
  657. if (!new_msg.empty()) {
  658. new_msg.set_tool_call_ids(generated_tool_call_ids, gen_tool_call_id);
  659. chat_msg = new_msg;
  660. diffs = common_chat_msg_diff::compute_diffs(msg_prv_copy, new_msg.empty() ? msg_prv_copy : new_msg);
  661. }
  662. return chat_msg;
  663. }
  664. json server_task_result_cmpl_final::to_json_oaicompat_chat_stream() {
  665. std::time_t t = std::time(0);
  666. std::string finish_reason = "length";
  667. if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
  668. finish_reason = oaicompat_msg.tool_calls.empty() ? "stop" : "tool_calls";
  669. }
  670. json deltas = json::array();
  671. for (const auto & diff : oaicompat_msg_diffs) {
  672. deltas.push_back({
  673. {"choices", json::array({
  674. json {
  675. {"finish_reason", nullptr},
  676. {"index", 0},
  677. {"delta", common_chat_msg_diff_to_json_oaicompat<json>(diff)},
  678. },
  679. })},
  680. {"created", t},
  681. {"id", oaicompat_cmpl_id},
  682. {"model", oaicompat_model},
  683. {"system_fingerprint", build_info},
  684. {"object", "chat.completion.chunk"},
  685. });
  686. }
  687. deltas.push_back({
  688. {"choices", json::array({
  689. json {
  690. {"finish_reason", finish_reason},
  691. {"index", 0},
  692. {"delta", json::object()},
  693. },
  694. })},
  695. {"created", t},
  696. {"id", oaicompat_cmpl_id},
  697. {"model", oaicompat_model},
  698. {"system_fingerprint", build_info},
  699. {"object", "chat.completion.chunk"},
  700. });
  701. if (include_usage) {
  702. // OpenAI API spec for chat.completion.chunks specifies an empty `choices` array for the last chunk when including usage
  703. // https://platform.openai.com/docs/api-reference/chat_streaming/streaming#chat_streaming/streaming-choices
  704. deltas.push_back({
  705. {"choices", json::array()},
  706. {"created", t},
  707. {"id", oaicompat_cmpl_id},
  708. {"model", oaicompat_model},
  709. {"system_fingerprint", build_info},
  710. {"object", "chat.completion.chunk"},
  711. {"usage", json {
  712. {"completion_tokens", n_decoded},
  713. {"prompt_tokens", n_prompt_tokens},
  714. {"total_tokens", n_decoded + n_prompt_tokens},
  715. }},
  716. });
  717. }
  718. if (timings.prompt_n >= 0) {
  719. deltas.back().push_back({"timings", timings.to_json()});
  720. }
  721. // extra fields for debugging purposes
  722. if (verbose && !deltas.empty()) {
  723. deltas.front()["__verbose"] = to_json_non_oaicompat();
  724. }
  725. return deltas;
  726. }
  727. json server_task_result_cmpl_final::to_json_anthropic() {
  728. std::string stop_reason = "max_tokens";
  729. if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
  730. stop_reason = oaicompat_msg.tool_calls.empty() ? "end_turn" : "tool_use";
  731. }
  732. json content_blocks = json::array();
  733. common_chat_msg msg;
  734. if (!oaicompat_msg.empty()) {
  735. msg = oaicompat_msg;
  736. } else {
  737. msg.role = "assistant";
  738. msg.content = content;
  739. }
  740. // thinking block comes first (Anthropic extended thinking format)
  741. if (!msg.reasoning_content.empty()) {
  742. content_blocks.push_back({
  743. {"type", "thinking"},
  744. {"thinking", msg.reasoning_content},
  745. {"signature", ""} // empty signature for local models (no cryptographic verification)
  746. });
  747. }
  748. if (!msg.content.empty()) {
  749. content_blocks.push_back({
  750. {"type", "text"},
  751. {"text", msg.content}
  752. });
  753. }
  754. for (const auto & tool_call : msg.tool_calls) {
  755. json tool_use_block = {
  756. {"type", "tool_use"},
  757. {"id", tool_call.id},
  758. {"name", tool_call.name}
  759. };
  760. try {
  761. tool_use_block["input"] = json::parse(tool_call.arguments);
  762. } catch (const std::exception &) {
  763. tool_use_block["input"] = json::object();
  764. }
  765. content_blocks.push_back(tool_use_block);
  766. }
  767. json res = {
  768. {"id", oaicompat_cmpl_id},
  769. {"type", "message"},
  770. {"role", "assistant"},
  771. {"content", content_blocks},
  772. {"model", oaicompat_model},
  773. {"stop_reason", stop_reason},
  774. {"stop_sequence", stopping_word.empty() ? nullptr : json(stopping_word)},
  775. {"usage", {
  776. {"input_tokens", n_prompt_tokens},
  777. {"output_tokens", n_decoded}
  778. }}
  779. };
  780. return res;
  781. }
  782. json server_task_result_cmpl_final::to_json_anthropic_stream() {
  783. json events = json::array();
  784. std::string stop_reason = "max_tokens";
  785. if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) {
  786. stop_reason = oaicompat_msg.tool_calls.empty() ? "end_turn" : "tool_use";
  787. }
  788. bool has_thinking = !oaicompat_msg.reasoning_content.empty();
  789. bool has_text = !oaicompat_msg.content.empty();
  790. size_t num_tool_calls = oaicompat_msg.tool_calls.size();
  791. // content block indices: thinking (0) -> text (0 or 1) -> tool_use (n+)
  792. size_t thinking_block_index = 0;
  793. size_t text_block_index = has_thinking ? 1 : 0;
  794. bool thinking_block_started = false;
  795. bool text_block_started = false;
  796. std::unordered_set<size_t> tool_calls_started;
  797. for (const auto & diff : oaicompat_msg_diffs) {
  798. // handle thinking/reasoning content
  799. if (!diff.reasoning_content_delta.empty()) {
  800. if (!thinking_block_started) {
  801. events.push_back({
  802. {"event", "content_block_start"},
  803. {"data", {
  804. {"type", "content_block_start"},
  805. {"index", thinking_block_index},
  806. {"content_block", {
  807. {"type", "thinking"},
  808. {"thinking", ""}
  809. }}
  810. }}
  811. });
  812. thinking_block_started = true;
  813. }
  814. events.push_back({
  815. {"event", "content_block_delta"},
  816. {"data", {
  817. {"type", "content_block_delta"},
  818. {"index", thinking_block_index},
  819. {"delta", {
  820. {"type", "thinking_delta"},
  821. {"thinking", diff.reasoning_content_delta}
  822. }}
  823. }}
  824. });
  825. }
  826. // handle regular text content
  827. if (!diff.content_delta.empty()) {
  828. if (!text_block_started) {
  829. events.push_back({
  830. {"event", "content_block_start"},
  831. {"data", {
  832. {"type", "content_block_start"},
  833. {"index", text_block_index},
  834. {"content_block", {
  835. {"type", "text"},
  836. {"text", ""}
  837. }}
  838. }}
  839. });
  840. text_block_started = true;
  841. }
  842. events.push_back({
  843. {"event", "content_block_delta"},
  844. {"data", {
  845. {"type", "content_block_delta"},
  846. {"index", text_block_index},
  847. {"delta", {
  848. {"type", "text_delta"},
  849. {"text", diff.content_delta}
  850. }}
  851. }}
  852. });
  853. }
  854. // handle tool calls
  855. if (diff.tool_call_index != std::string::npos) {
  856. size_t content_block_index = (has_thinking ? 1 : 0) + (has_text ? 1 : 0) + diff.tool_call_index;
  857. if (tool_calls_started.find(diff.tool_call_index) == tool_calls_started.end()) {
  858. const auto & full_tool_call = oaicompat_msg.tool_calls[diff.tool_call_index];
  859. events.push_back({
  860. {"event", "content_block_start"},
  861. {"data", {
  862. {"type", "content_block_start"},
  863. {"index", content_block_index},
  864. {"content_block", {
  865. {"type", "tool_use"},
  866. {"id", full_tool_call.id},
  867. {"name", full_tool_call.name}
  868. }}
  869. }}
  870. });
  871. tool_calls_started.insert(diff.tool_call_index);
  872. }
  873. if (!diff.tool_call_delta.arguments.empty()) {
  874. events.push_back({
  875. {"event", "content_block_delta"},
  876. {"data", {
  877. {"type", "content_block_delta"},
  878. {"index", content_block_index},
  879. {"delta", {
  880. {"type", "input_json_delta"},
  881. {"partial_json", diff.tool_call_delta.arguments}
  882. }}
  883. }}
  884. });
  885. }
  886. }
  887. }
  888. // close content blocks in order
  889. if (has_thinking) {
  890. // Anthropic API requires a signature_delta before closing thinking blocks
  891. // We use an empty signature since we can't generate a cryptographic signature for local models
  892. events.push_back({
  893. {"event", "content_block_delta"},
  894. {"data", {
  895. {"type", "content_block_delta"},
  896. {"index", thinking_block_index},
  897. {"delta", {
  898. {"type", "signature_delta"},
  899. {"signature", ""}
  900. }}
  901. }}
  902. });
  903. events.push_back({
  904. {"event", "content_block_stop"},
  905. {"data", {
  906. {"type", "content_block_stop"},
  907. {"index", thinking_block_index}
  908. }}
  909. });
  910. }
  911. if (has_text) {
  912. events.push_back({
  913. {"event", "content_block_stop"},
  914. {"data", {
  915. {"type", "content_block_stop"},
  916. {"index", text_block_index}
  917. }}
  918. });
  919. }
  920. for (size_t i = 0; i < num_tool_calls; i++) {
  921. size_t content_block_index = (has_thinking ? 1 : 0) + (has_text ? 1 : 0) + i;
  922. events.push_back({
  923. {"event", "content_block_stop"},
  924. {"data", {
  925. {"type", "content_block_stop"},
  926. {"index", content_block_index}
  927. }}
  928. });
  929. }
  930. events.push_back({
  931. {"event", "message_delta"},
  932. {"data", {
  933. {"type", "message_delta"},
  934. {"delta", {
  935. {"stop_reason", stop_reason},
  936. {"stop_sequence", stopping_word.empty() ? nullptr : json(stopping_word)}
  937. }},
  938. {"usage", {
  939. {"output_tokens", n_decoded}
  940. }}
  941. }}
  942. });
  943. events.push_back({
  944. {"event", "message_stop"},
  945. {"data", {
  946. {"type", "message_stop"}
  947. }}
  948. });
  949. return events;
  950. }
  951. //
  952. // server_task_result_cmpl_partial
  953. //
  954. json server_task_result_cmpl_partial::to_json() {
  955. GGML_ASSERT(is_updated && "update() must be called before to_json()");
  956. switch (res_type) {
  957. case TASK_RESPONSE_TYPE_NONE:
  958. return to_json_non_oaicompat();
  959. case TASK_RESPONSE_TYPE_OAI_CMPL:
  960. return to_json_oaicompat();
  961. case TASK_RESPONSE_TYPE_OAI_CHAT:
  962. return to_json_oaicompat_chat();
  963. case TASK_RESPONSE_TYPE_ANTHROPIC:
  964. return to_json_anthropic();
  965. default:
  966. GGML_ASSERT(false && "Invalid task_response_type");
  967. }
  968. }
  969. json server_task_result_cmpl_partial::to_json_non_oaicompat() {
  970. // non-OAI-compat JSON
  971. json res = json {
  972. {"index", index},
  973. {"content", content},
  974. {"tokens", tokens},
  975. {"stop", false},
  976. {"id_slot", id_slot},
  977. {"tokens_predicted", n_decoded},
  978. {"tokens_evaluated", n_prompt_tokens},
  979. };
  980. // populate the timings object when needed (usually for the last response or with timings_per_token enabled)
  981. if (timings.prompt_n > 0) {
  982. res.push_back({"timings", timings.to_json()});
  983. }
  984. if (is_progress) {
  985. res.push_back({"prompt_progress", progress.to_json()});
  986. }
  987. if (!prob_output.probs.empty()) {
  988. res["completion_probabilities"] = completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs);
  989. }
  990. return res;
  991. }
  992. json server_task_result_cmpl_partial::to_json_oaicompat() {
  993. std::time_t t = std::time(0);
  994. json logprobs = json(nullptr); // OAI default to null
  995. if (prob_output.probs.size() > 0) {
  996. logprobs = json{
  997. {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)},
  998. };
  999. }
  1000. json res = json {
  1001. {"choices", json::array({
  1002. json{
  1003. {"text", content},
  1004. {"index", index},
  1005. {"logprobs", logprobs},
  1006. {"finish_reason", nullptr},
  1007. }
  1008. })},
  1009. {"created", t},
  1010. {"model", oaicompat_model},
  1011. {"system_fingerprint", build_info},
  1012. {"object", "text_completion"},
  1013. {"id", oaicompat_cmpl_id}
  1014. };
  1015. // extra fields for debugging purposes
  1016. if (verbose) {
  1017. res["__verbose"] = to_json_non_oaicompat();
  1018. }
  1019. if (timings.prompt_n >= 0) {
  1020. res.push_back({"timings", timings.to_json()});
  1021. }
  1022. if (is_progress) {
  1023. res.push_back({"prompt_progress", progress.to_json()});
  1024. }
  1025. return res;
  1026. }
  1027. json server_task_result_cmpl_partial::to_json_oaicompat_chat() {
  1028. bool first = n_decoded == 1;
  1029. std::time_t t = std::time(0);
  1030. json choices;
  1031. std::vector<json> deltas;
  1032. auto add_delta = [&](const json & delta) {
  1033. deltas.push_back({
  1034. {"choices", json::array({
  1035. json {
  1036. {"finish_reason", nullptr},
  1037. {"index", index},
  1038. {"delta", delta},
  1039. },
  1040. })},
  1041. {"created", t},
  1042. {"id", oaicompat_cmpl_id},
  1043. {"model", oaicompat_model},
  1044. {"system_fingerprint", build_info},
  1045. {"object", "chat.completion.chunk"},
  1046. });
  1047. };
  1048. // We have to send an initial update to conform to openai behavior
  1049. if (first || is_progress) {
  1050. add_delta({
  1051. {"role", "assistant"},
  1052. {"content", nullptr},
  1053. });
  1054. }
  1055. for (const auto & diff : oaicompat_msg_diffs) {
  1056. add_delta(common_chat_msg_diff_to_json_oaicompat<json>(diff));
  1057. }
  1058. if (!deltas.empty()) {
  1059. auto & last_json = deltas[deltas.size() - 1];
  1060. GGML_ASSERT(last_json.at("choices").size() >= 1);
  1061. if (prob_output.probs.size() > 0) {
  1062. last_json.at("choices").at(0)["logprobs"] = json {
  1063. {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)},
  1064. };
  1065. }
  1066. if (timings.prompt_n >= 0) {
  1067. last_json.push_back({"timings", timings.to_json()});
  1068. }
  1069. if (is_progress) {
  1070. last_json.push_back({"prompt_progress", progress.to_json()});
  1071. }
  1072. }
  1073. return deltas;
  1074. }
  1075. //
  1076. // server_task_result_embd
  1077. //
  1078. json server_task_result_embd::to_json() {
  1079. return res_type == TASK_RESPONSE_TYPE_OAI_EMBD
  1080. ? to_json_oaicompat()
  1081. : to_json_non_oaicompat();
  1082. }
  1083. json server_task_result_embd::to_json_non_oaicompat() {
  1084. return json {
  1085. {"index", index},
  1086. {"embedding", embedding},
  1087. };
  1088. }
  1089. json server_task_result_embd::to_json_oaicompat() {
  1090. return json {
  1091. {"index", index},
  1092. {"embedding", embedding[0]},
  1093. {"tokens_evaluated", n_tokens},
  1094. };
  1095. }
  1096. //
  1097. // server_task_result_rerank
  1098. //
  1099. json server_task_result_rerank::to_json() {
  1100. return json {
  1101. {"index", index},
  1102. {"score", score},
  1103. {"tokens_evaluated", n_tokens},
  1104. };
  1105. }
  1106. json server_task_result_cmpl_partial::to_json_anthropic() {
  1107. json events = json::array();
  1108. bool first = (n_decoded == 1);
  1109. // use member variables to track block state across streaming calls
  1110. // (anthropic_thinking_block_started, anthropic_text_block_started)
  1111. if (first) {
  1112. events.push_back({
  1113. {"event", "message_start"},
  1114. {"data", {
  1115. {"type", "message_start"},
  1116. {"message", {
  1117. {"id", oaicompat_cmpl_id},
  1118. {"type", "message"},
  1119. {"role", "assistant"},
  1120. {"content", json::array()},
  1121. {"model", oaicompat_model},
  1122. {"stop_reason", nullptr},
  1123. {"stop_sequence", nullptr},
  1124. {"usage", {
  1125. {"input_tokens", n_prompt_tokens},
  1126. {"output_tokens", 0}
  1127. }}
  1128. }}
  1129. }}
  1130. });
  1131. }
  1132. // content block indices: thinking (0) -> text (0 or 1) -> tool_use (n+)
  1133. size_t thinking_block_index = 0;
  1134. // use anthropic_has_reasoning (set in update()) to know if ANY reasoning was generated
  1135. size_t text_block_index = anthropic_has_reasoning ? 1 : 0;
  1136. // use local copies of streaming state (copied from task_result_state in update())
  1137. // these reflect the state BEFORE this chunk was processed
  1138. bool thinking_started = anthropic_thinking_block_started;
  1139. bool text_started = anthropic_text_block_started;
  1140. for (const auto & diff : oaicompat_msg_diffs) {
  1141. // handle thinking/reasoning content
  1142. if (!diff.reasoning_content_delta.empty()) {
  1143. if (!thinking_started) {
  1144. events.push_back({
  1145. {"event", "content_block_start"},
  1146. {"data", {
  1147. {"type", "content_block_start"},
  1148. {"index", thinking_block_index},
  1149. {"content_block", {
  1150. {"type", "thinking"},
  1151. {"thinking", ""}
  1152. }}
  1153. }}
  1154. });
  1155. thinking_started = true;
  1156. }
  1157. events.push_back({
  1158. {"event", "content_block_delta"},
  1159. {"data", {
  1160. {"type", "content_block_delta"},
  1161. {"index", thinking_block_index},
  1162. {"delta", {
  1163. {"type", "thinking_delta"},
  1164. {"thinking", diff.reasoning_content_delta}
  1165. }}
  1166. }}
  1167. });
  1168. }
  1169. // handle regular text content
  1170. if (!diff.content_delta.empty()) {
  1171. if (!text_started) {
  1172. events.push_back({
  1173. {"event", "content_block_start"},
  1174. {"data", {
  1175. {"type", "content_block_start"},
  1176. {"index", text_block_index},
  1177. {"content_block", {
  1178. {"type", "text"},
  1179. {"text", ""}
  1180. }}
  1181. }}
  1182. });
  1183. text_started = true;
  1184. }
  1185. events.push_back({
  1186. {"event", "content_block_delta"},
  1187. {"data", {
  1188. {"type", "content_block_delta"},
  1189. {"index", text_block_index},
  1190. {"delta", {
  1191. {"type", "text_delta"},
  1192. {"text", diff.content_delta}
  1193. }}
  1194. }}
  1195. });
  1196. }
  1197. // handle tool calls
  1198. if (diff.tool_call_index != std::string::npos) {
  1199. // use anthropic_has_reasoning for thinking block count (persists across calls)
  1200. size_t content_block_index = (anthropic_has_reasoning ? 1 : 0) + (text_started ? 1 : 0) + diff.tool_call_index;
  1201. if (!diff.tool_call_delta.name.empty()) {
  1202. events.push_back({
  1203. {"event", "content_block_start"},
  1204. {"data", {
  1205. {"type", "content_block_start"},
  1206. {"index", content_block_index},
  1207. {"content_block", {
  1208. {"type", "tool_use"},
  1209. {"id", diff.tool_call_delta.id},
  1210. {"name", diff.tool_call_delta.name}
  1211. }}
  1212. }}
  1213. });
  1214. }
  1215. if (!diff.tool_call_delta.arguments.empty()) {
  1216. events.push_back({
  1217. {"event", "content_block_delta"},
  1218. {"data", {
  1219. {"type", "content_block_delta"},
  1220. {"index", content_block_index},
  1221. {"delta", {
  1222. {"type", "input_json_delta"},
  1223. {"partial_json", diff.tool_call_delta.arguments}
  1224. }}
  1225. }}
  1226. });
  1227. }
  1228. }
  1229. }
  1230. return events;
  1231. }
  1232. //
  1233. // server_task_result_error
  1234. //
  1235. json server_task_result_error::to_json() {
  1236. json res = format_error_response(err_msg, err_type);
  1237. if (err_type == ERROR_TYPE_EXCEED_CONTEXT_SIZE) {
  1238. res["n_prompt_tokens"] = n_prompt_tokens;
  1239. res["n_ctx"] = n_ctx;
  1240. }
  1241. return res;
  1242. }
  1243. //
  1244. // server_task_result_metrics
  1245. //
  1246. json server_task_result_metrics::to_json() {
  1247. return json {
  1248. { "idle", n_idle_slots },
  1249. { "processing", n_processing_slots },
  1250. { "deferred", n_tasks_deferred },
  1251. { "t_start", t_start },
  1252. { "n_prompt_tokens_processed_total", n_prompt_tokens_processed_total },
  1253. { "t_tokens_generation_total", t_tokens_generation_total },
  1254. { "n_tokens_predicted_total", n_tokens_predicted_total },
  1255. { "t_prompt_processing_total", t_prompt_processing_total },
  1256. { "n_tokens_max", n_tokens_max },
  1257. { "n_prompt_tokens_processed", n_prompt_tokens_processed },
  1258. { "t_prompt_processing", t_prompt_processing },
  1259. { "n_tokens_predicted", n_tokens_predicted },
  1260. { "t_tokens_generation", t_tokens_generation },
  1261. { "n_decode_total", n_decode_total },
  1262. { "n_busy_slots_total", n_busy_slots_total },
  1263. { "slots", slots_data },
  1264. };
  1265. }
  1266. //
  1267. // server_task_result_slot_save_load
  1268. //
  1269. json server_task_result_slot_save_load::to_json() {
  1270. if (is_save) {
  1271. return json {
  1272. { "id_slot", id_slot },
  1273. { "filename", filename },
  1274. { "n_saved", n_tokens },
  1275. { "n_written", n_bytes },
  1276. { "timings", {
  1277. { "save_ms", t_ms }
  1278. }},
  1279. };
  1280. }
  1281. return json {
  1282. { "id_slot", id_slot },
  1283. { "filename", filename },
  1284. { "n_restored", n_tokens },
  1285. { "n_read", n_bytes },
  1286. { "timings", {
  1287. { "restore_ms", t_ms }
  1288. }},
  1289. };
  1290. }
  1291. //
  1292. // server_task_result_slot_erase
  1293. //
  1294. json server_task_result_slot_erase::to_json() {
  1295. return json {
  1296. { "id_slot", id_slot },
  1297. { "n_erased", n_erased },
  1298. };
  1299. }
  1300. //
  1301. // server_task_result_get_lora
  1302. //
  1303. json server_task_result_get_lora::to_json() {
  1304. json result = json::array();
  1305. for (size_t i = 0; i < loras.size(); ++i) {
  1306. auto & lora = loras[i];
  1307. json entry = {
  1308. {"id", i},
  1309. {"path", lora.info.path},
  1310. {"scale", lora.info.scale},
  1311. {"task_name", lora.info.task_name},
  1312. {"prompt_prefix", lora.info.prompt_prefix},
  1313. };
  1314. if (!lora.alora_invocation_tokens.empty()) {
  1315. entry["alora_invocation_string"] = lora.alora_invocation_string;
  1316. entry["alora_invocation_tokens"] = lora.alora_invocation_tokens;
  1317. }
  1318. result.push_back(std::move(entry));
  1319. }
  1320. return result;
  1321. }
  1322. //
  1323. // server_task_result_apply_lora
  1324. //
  1325. json server_task_result_apply_lora::to_json() {
  1326. return json {{ "success", true }};
  1327. }
  1328. //
  1329. // server_prompt_cache
  1330. //
  1331. size_t server_prompt_cache::size() const {
  1332. size_t res = 0;
  1333. for (const auto & state : states) {
  1334. res += state.size();
  1335. }
  1336. return res;
  1337. }
  1338. size_t server_prompt_cache::n_tokens() const {
  1339. size_t res = 0;
  1340. for (const auto & state : states) {
  1341. res += state.n_tokens();
  1342. }
  1343. return res;
  1344. }
  1345. server_prompt * server_prompt_cache::alloc(const server_prompt & prompt, size_t state_size) {
  1346. // first check if the current state is contained fully in the cache
  1347. for (auto it = states.begin(); it != states.end(); ++it) {
  1348. const int cur_lcp_len = it->tokens.get_common_prefix(prompt.tokens);
  1349. if (cur_lcp_len == (int) prompt.tokens.size()) {
  1350. SRV_WRN("%s", " - prompt is already in the cache, skipping\n");
  1351. return nullptr;
  1352. }
  1353. }
  1354. // next, remove any cached prompts that are fully contained in the current prompt
  1355. for (auto it = states.begin(); it != states.end();) {
  1356. const int len = it->tokens.get_common_prefix(prompt.tokens);
  1357. if (len == (int) it->tokens.size()) {
  1358. SRV_WRN(" - removing obsolete cached prompt with length %d\n", len);
  1359. it = states.erase(it);
  1360. } else {
  1361. ++it;
  1362. }
  1363. }
  1364. std::vector<uint8_t> state_data;
  1365. // check if we can allocate enough memory for the new state
  1366. try {
  1367. state_data.resize(state_size);
  1368. } catch (const std::bad_alloc & e) {
  1369. SRV_ERR("failed to allocate memory for prompt cache state: %s\n", e.what());
  1370. limit_size = std::max<size_t>(1, 0.4*size());
  1371. SRV_WRN(" - cache size limit reduced to %.3f MiB\n", limit_size / (1024.0 * 1024.0));
  1372. update();
  1373. return nullptr;
  1374. }
  1375. // TODO: for some reason we can't copy server_tokens, so we have to do this workaround
  1376. auto & cur = states.emplace_back();
  1377. cur = {
  1378. /*.tokens =*/ server_tokens(prompt.tokens.get_text_tokens(), false),
  1379. /*.data =*/ std::move(state_data),
  1380. /*.checkpoints =*/ prompt.checkpoints,
  1381. };
  1382. return &cur;
  1383. }
  1384. bool server_prompt_cache::load(server_prompt & prompt, const server_tokens & tokens_new, llama_context * ctx, int32_t id_slot) {
  1385. const int lcp_best = prompt.tokens.get_common_prefix(tokens_new);
  1386. float f_keep_best = float(lcp_best) / prompt.tokens.size();
  1387. float sim_best = float(lcp_best) / tokens_new.size();
  1388. SRV_WRN(" - looking for better prompt, base f_keep = %.3f, sim = %.3f\n", f_keep_best, sim_best);
  1389. auto it_best = states.end();
  1390. // find the most similar cached prompt, that would also preserve the most context
  1391. for (auto it = states.begin(); it != states.end(); ++it) {
  1392. const int lcp_cur = it->tokens.get_common_prefix(tokens_new);
  1393. const float f_keep_cur = float(lcp_cur) / it->tokens.size();
  1394. const float sim_cur = float(lcp_cur) / tokens_new.size();
  1395. // don't trash large prompts
  1396. if (f_keep_cur < 0.25f) {
  1397. continue;
  1398. }
  1399. if (f_keep_best < f_keep_cur && sim_best < sim_cur) {
  1400. f_keep_best = f_keep_cur;
  1401. sim_best = sim_cur;
  1402. it_best = it;
  1403. }
  1404. }
  1405. if (it_best != states.end()) {
  1406. SRV_WRN(" - found better prompt with f_keep = %.3f, sim = %.3f\n", f_keep_best, sim_best);
  1407. const size_t size = it_best->data.size();
  1408. const size_t n = llama_state_seq_set_data_ext(ctx, it_best->data.data(), size, id_slot, 0);
  1409. if (n != size) {
  1410. SRV_WRN("failed to restore state with size %zu\n", size);
  1411. return false;
  1412. }
  1413. it_best->data.clear();
  1414. it_best->data.shrink_to_fit();
  1415. prompt = std::move(*it_best);
  1416. states.erase(it_best);
  1417. }
  1418. return true;
  1419. }
  1420. void server_prompt_cache::update() {
  1421. if (limit_size > 0) {
  1422. // always keep at least one state, regardless of the limits
  1423. while (states.size() > 1 && size() > limit_size) {
  1424. if (states.empty()) {
  1425. break;
  1426. }
  1427. SRV_WRN(" - cache size limit reached, removing oldest entry (size = %.3f MiB)\n", states.front().size() / (1024.0 * 1024.0));
  1428. states.pop_front();
  1429. }
  1430. }
  1431. // average size per token
  1432. const float size_per_token = std::max<float>(1.0f, float(size()) / (std::max<size_t>(1, n_tokens())));
  1433. // dynamically increase the token limit if it can fit in the memory limit
  1434. const size_t limit_tokens_cur = limit_size > 0 ? std::max<size_t>(limit_tokens, limit_size/size_per_token) : limit_tokens;
  1435. if (limit_tokens > 0) {
  1436. while (states.size() > 1 && n_tokens() > limit_tokens_cur) {
  1437. if (states.empty()) {
  1438. break;
  1439. }
  1440. SRV_WRN(" - cache token limit (%zu, est: %zu) reached, removing oldest entry (size = %.3f MiB)\n",
  1441. limit_tokens, limit_tokens_cur, states.front().size() / (1024.0 * 1024.0));
  1442. states.pop_front();
  1443. }
  1444. }
  1445. SRV_WRN(" - cache state: %zu prompts, %.3f MiB (limits: %.3f MiB, %zu tokens, %zu est)\n",
  1446. states.size(), size() / (1024.0 * 1024.0), limit_size / (1024.0 * 1024.0), limit_tokens, limit_tokens_cur);
  1447. for (const auto & state : states) {
  1448. SRV_WRN(" - prompt %p: %7d tokens, checkpoints: %2zu, %9.3f MiB\n",
  1449. (const void *)&state, state.n_tokens(), state.checkpoints.size(), state.size() / (1024.0 * 1024.0));
  1450. }
  1451. }