chat.cpp 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730
  1. #include "chat.h"
  2. #include "chat-parser.h"
  3. #include "chat-peg-parser.h"
  4. #include "common.h"
  5. #include "json-partial.h"
  6. #include "json-schema-to-grammar.h"
  7. #include "log.h"
  8. #include "regex-partial.h"
  9. #include <minja/chat-template.hpp>
  10. #include <minja/minja.hpp>
  11. #include <algorithm>
  12. #include <cstdio>
  13. #include <cctype>
  14. #include <exception>
  15. #include <functional>
  16. #include <iostream>
  17. #include <optional>
  18. #include <stdexcept>
  19. #include <string>
  20. #include <vector>
  21. using json = nlohmann::ordered_json;
  22. static std::string format_time(const std::chrono::system_clock::time_point & now, const std::string & format) {
  23. auto time = std::chrono::system_clock::to_time_t(now);
  24. auto local_time = *std::localtime(&time);
  25. std::ostringstream ss;
  26. ss << std::put_time(&local_time, format.c_str());
  27. auto res = ss.str();
  28. return res;
  29. }
  30. static std::string string_diff(const std::string & last, const std::string & current) {
  31. if (last.empty()) {
  32. return current;
  33. }
  34. if (!string_starts_with(current, last)) {
  35. if (string_starts_with(last, current)) {
  36. // This happens if the last generation ended on a partial stop word (not erased),
  37. // and the current ended on a stop word (erased).
  38. return "";
  39. }
  40. throw std::runtime_error("Invalid diff: '" + last + "' not found at start of '" + current + "'");
  41. }
  42. return current.substr(last.size());
  43. }
  44. static bool has_content_or_tool_calls(const common_chat_msg & msg) {
  45. return !msg.content.empty() || !msg.tool_calls.empty();
  46. }
  47. template <>
  48. json common_chat_msg::to_json_oaicompat() const
  49. {
  50. json message {
  51. {"role", "assistant"},
  52. };
  53. if (!reasoning_content.empty()) {
  54. message["reasoning_content"] = reasoning_content;
  55. }
  56. if (content.empty() && !tool_calls.empty()) {
  57. message["content"] = json();
  58. } else {
  59. message["content"] = content;
  60. }
  61. if (!tool_calls.empty()) {
  62. auto arr = json::array();
  63. for (const auto & tc : tool_calls) {
  64. arr.push_back({
  65. {"type", "function"},
  66. {"function", {
  67. {"name", tc.name},
  68. {"arguments", tc.arguments},
  69. }},
  70. {"id", tc.id},
  71. // // Some templates generate and require an id (sometimes in a very specific format, e.g. Mistral Nemo).
  72. // // We only generate a random id for the ones that don't generate one by themselves
  73. // // (they also won't get to see it as their template likely doesn't use it, so it's all for the client)
  74. // {"id", tc.id.empty() ? gen_tool_call_id() : tc.id},
  75. });
  76. }
  77. message["tool_calls"] = arr;
  78. }
  79. return message;
  80. }
  81. std::vector<common_chat_msg_diff> common_chat_msg_diff::compute_diffs(const common_chat_msg & msg_prv, const common_chat_msg & msg_new) {
  82. std::vector<common_chat_msg_diff> diffs;
  83. if (msg_new.tool_calls.size() > msg_prv.tool_calls.size()) {
  84. diffs.reserve(msg_new.tool_calls.size() - msg_prv.tool_calls.size() + 3);
  85. } else {
  86. diffs.reserve(3);
  87. }
  88. // TODO: these can become expensive for long messages - how to optimize?
  89. if (msg_prv.reasoning_content != msg_new.reasoning_content) {
  90. auto & diff = diffs.emplace_back();
  91. diff.reasoning_content_delta = string_diff(msg_prv.reasoning_content, msg_new.reasoning_content);
  92. }
  93. if (msg_prv.content != msg_new.content) {
  94. auto & diff = diffs.emplace_back();
  95. diff.content_delta = string_diff(msg_prv.content, msg_new.content);
  96. }
  97. if (msg_new.tool_calls.size() < msg_prv.tool_calls.size()) {
  98. throw std::runtime_error("Invalid diff: now finding less tool calls!");
  99. }
  100. if (!msg_prv.tool_calls.empty()) {
  101. const auto idx = msg_prv.tool_calls.size() - 1;
  102. const auto & pref = msg_prv.tool_calls[idx];
  103. const auto & newf = msg_new.tool_calls[idx];
  104. if (pref.name != newf.name) {
  105. throw std::runtime_error("Invalid diff: tool call mismatch!");
  106. }
  107. const auto args_diff = string_diff(pref.arguments, newf.arguments);
  108. if (!args_diff.empty() || pref.id != newf.id) {
  109. auto & diff = diffs.emplace_back();
  110. diff.tool_call_index = idx;
  111. if (pref.id != newf.id) {
  112. diff.tool_call_delta.id = newf.id;
  113. diff.tool_call_delta.name = newf.name;
  114. }
  115. diff.tool_call_delta.arguments = args_diff;
  116. }
  117. }
  118. for (size_t idx = msg_prv.tool_calls.size(); idx < msg_new.tool_calls.size(); ++idx) {
  119. auto & diff = diffs.emplace_back();
  120. diff.tool_call_index = idx;
  121. diff.tool_call_delta = msg_new.tool_calls[idx];
  122. }
  123. return diffs;
  124. }
  125. typedef minja::chat_template common_chat_template;
  126. struct common_chat_templates {
  127. bool add_bos;
  128. bool add_eos;
  129. bool has_explicit_template; // Model had builtin template or template overridde was specified.
  130. std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
  131. std::unique_ptr<common_chat_template> template_tool_use;
  132. };
  133. struct templates_params {
  134. json messages;
  135. json tools;
  136. common_chat_tool_choice tool_choice;
  137. json json_schema;
  138. bool parallel_tool_calls;
  139. common_reasoning_format reasoning_format;
  140. bool stream;
  141. std::string grammar;
  142. bool add_generation_prompt = true;
  143. bool enable_thinking = true;
  144. std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
  145. json extra_context;
  146. bool add_bos;
  147. bool add_eos;
  148. bool is_inference = true;
  149. };
  150. common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice) {
  151. if (tool_choice == "auto") {
  152. return COMMON_CHAT_TOOL_CHOICE_AUTO;
  153. }
  154. if (tool_choice == "none") {
  155. return COMMON_CHAT_TOOL_CHOICE_NONE;
  156. }
  157. if (tool_choice == "required") {
  158. return COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  159. }
  160. throw std::invalid_argument("Invalid tool_choice: " + tool_choice);
  161. }
  162. bool common_chat_templates_support_enable_thinking(const common_chat_templates * chat_templates) {
  163. common_chat_templates_inputs dummy_inputs;
  164. common_chat_msg msg;
  165. msg.role = "user";
  166. msg.content = "test";
  167. dummy_inputs.messages = {msg};
  168. dummy_inputs.enable_thinking = false;
  169. const auto rendered_no_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  170. dummy_inputs.enable_thinking = true;
  171. const auto rendered_with_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  172. return rendered_no_thinking.prompt != rendered_with_thinking.prompt;
  173. }
  174. template <>
  175. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const json & messages) {
  176. std::vector<common_chat_msg> msgs;
  177. try {
  178. if (!messages.is_array()) {
  179. throw std::invalid_argument("Expected 'messages' to be an array, got " + messages.dump());
  180. }
  181. for (const auto & message : messages) {
  182. if (!message.is_object()) {
  183. throw std::invalid_argument("Expected 'message' to be an object, got " + message.dump());
  184. }
  185. common_chat_msg msg;
  186. if (!message.contains("role")) {
  187. throw std::invalid_argument("Missing 'role' in message: " + message.dump());
  188. }
  189. msg.role = message.at("role");
  190. auto has_content = message.contains("content");
  191. auto has_tool_calls = message.contains("tool_calls");
  192. if (has_content) {
  193. const auto & content = message.at("content");
  194. if (content.is_string()) {
  195. msg.content = content;
  196. } else if (content.is_array()) {
  197. for (const auto & part : content) {
  198. if (!part.contains("type")) {
  199. throw std::invalid_argument("Missing content part type: " + part.dump());
  200. }
  201. const auto & type = part.at("type");
  202. if (type != "text") {
  203. throw std::invalid_argument("Unsupported content part type: " + type.dump());
  204. }
  205. common_chat_msg_content_part msg_part;
  206. msg_part.type = type;
  207. msg_part.text = part.at("text");
  208. msg.content_parts.push_back(msg_part);
  209. }
  210. } else if (!content.is_null()) {
  211. throw std::invalid_argument("Invalid 'content' type: expected string or array, got " + content.dump() + " (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
  212. }
  213. }
  214. if (has_tool_calls) {
  215. for (const auto & tool_call : message.at("tool_calls")) {
  216. common_chat_tool_call tc;
  217. if (!tool_call.contains("type")) {
  218. throw std::invalid_argument("Missing tool call type: " + tool_call.dump());
  219. }
  220. const auto & type = tool_call.at("type");
  221. if (type != "function") {
  222. throw std::invalid_argument("Unsupported tool call type: " + tool_call.dump());
  223. }
  224. if (!tool_call.contains("function")) {
  225. throw std::invalid_argument("Missing tool call function: " + tool_call.dump());
  226. }
  227. const auto & fc = tool_call.at("function");
  228. if (!fc.contains("name")) {
  229. throw std::invalid_argument("Missing tool call name: " + tool_call.dump());
  230. }
  231. tc.name = fc.at("name");
  232. tc.arguments = fc.at("arguments");
  233. if (tool_call.contains("id")) {
  234. tc.id = tool_call.at("id");
  235. }
  236. msg.tool_calls.push_back(tc);
  237. }
  238. }
  239. if (!has_content && !has_tool_calls) {
  240. throw std::invalid_argument("Expected 'content' or 'tool_calls' (ref: https://github.com/ggml-org/llama.cpp/issues/8367 & https://github.com/ggml-org/llama.cpp/issues/12279)");
  241. }
  242. if (message.contains("reasoning_content")) {
  243. msg.reasoning_content = message.at("reasoning_content");
  244. }
  245. if (message.contains("name")) {
  246. msg.tool_name = message.at("name");
  247. }
  248. if (message.contains("tool_call_id")) {
  249. msg.tool_call_id = message.at("tool_call_id");
  250. }
  251. msgs.push_back(msg);
  252. }
  253. } catch (const std::exception & e) {
  254. // @ngxson : disable otherwise it's bloating the API response
  255. // printf("%s\n", std::string("; messages = ") + messages.dump(2));
  256. throw std::runtime_error("Failed to parse messages: " + std::string(e.what()));
  257. }
  258. return msgs;
  259. }
  260. template <>
  261. json common_chat_msgs_to_json_oaicompat(const std::vector<common_chat_msg> & msgs, bool concat_typed_text) {
  262. json messages = json::array();
  263. for (const auto & msg : msgs) {
  264. if (!msg.content.empty() && !msg.content_parts.empty()) {
  265. throw std::runtime_error("Cannot specify both content and content_parts");
  266. }
  267. json jmsg {
  268. {"role", msg.role},
  269. };
  270. if (!msg.content.empty()) {
  271. jmsg["content"] = msg.content;
  272. } else if (!msg.content_parts.empty()) {
  273. if (concat_typed_text) {
  274. std::string text;
  275. for (const auto & part : msg.content_parts) {
  276. if (part.type != "text") {
  277. LOG_WRN("Ignoring content part type: %s\n", part.type.c_str());
  278. continue;
  279. }
  280. if (!text.empty()) {
  281. text += '\n';
  282. }
  283. text += part.text;
  284. }
  285. jmsg["content"] = text;
  286. } else {
  287. auto & parts = jmsg["content"] = json::array();
  288. for (const auto & part : msg.content_parts) {
  289. parts.push_back({
  290. {"type", part.type},
  291. {"text", part.text},
  292. });
  293. }
  294. }
  295. } else {
  296. jmsg["content"] = json(); // null
  297. }
  298. if (!msg.reasoning_content.empty()) {
  299. jmsg["reasoning_content"] = msg.reasoning_content;
  300. }
  301. if (!msg.tool_name.empty()) {
  302. jmsg["name"] = msg.tool_name;
  303. }
  304. if (!msg.tool_call_id.empty()) {
  305. jmsg["tool_call_id"] = msg.tool_call_id;
  306. }
  307. if (!msg.tool_calls.empty()) {
  308. auto & tool_calls = jmsg["tool_calls"] = json::array();
  309. for (const auto & tool_call : msg.tool_calls) {
  310. json tc {
  311. {"type", "function"},
  312. {"function", {
  313. {"name", tool_call.name},
  314. {"arguments", tool_call.arguments},
  315. }},
  316. };
  317. if (!tool_call.id.empty()) {
  318. tc["id"] = tool_call.id;
  319. }
  320. tool_calls.push_back(tc);
  321. }
  322. }
  323. messages.push_back(jmsg);
  324. }
  325. return messages;
  326. }
  327. template <>
  328. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const std::string & messages) {
  329. return common_chat_msgs_parse_oaicompat(json::parse(messages));
  330. }
  331. template <>
  332. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const json & tools) {
  333. std::vector<common_chat_tool> result;
  334. try {
  335. if (!tools.is_null()) {
  336. if (!tools.is_array()) {
  337. throw std::invalid_argument("Expected 'tools' to be an array, got " + tools.dump());
  338. }
  339. for (const auto & tool : tools) {
  340. if (!tool.contains("type")) {
  341. throw std::invalid_argument("Missing tool type: " + tool.dump());
  342. }
  343. const auto & type = tool.at("type");
  344. if (!type.is_string() || type != "function") {
  345. throw std::invalid_argument("Unsupported tool type: " + tool.dump());
  346. }
  347. if (!tool.contains("function")) {
  348. throw std::invalid_argument("Missing tool function: " + tool.dump());
  349. }
  350. const auto & function = tool.at("function");
  351. result.push_back({
  352. /* .name = */ function.at("name"),
  353. /* .description = */ function.at("description"),
  354. /* .parameters = */ function.at("parameters").dump(),
  355. });
  356. }
  357. }
  358. } catch (const std::exception & e) {
  359. throw std::runtime_error("Failed to parse tools: " + std::string(e.what()) + "; tools = " + tools.dump(2));
  360. }
  361. return result;
  362. }
  363. template <>
  364. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const std::string & tools) {
  365. return common_chat_tools_parse_oaicompat(json::parse(tools));
  366. }
  367. template <>
  368. json common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools) {
  369. if (tools.empty()) {
  370. return json();
  371. }
  372. auto result = json::array();
  373. for (const auto & tool : tools) {
  374. result.push_back({
  375. {"type", "function"},
  376. {"function", {
  377. {"name", tool.name},
  378. {"description", tool.description},
  379. {"parameters", json::parse(tool.parameters)},
  380. }},
  381. });
  382. }
  383. return result;
  384. }
  385. template <> json common_chat_msg_diff_to_json_oaicompat(const common_chat_msg_diff & diff) {
  386. json delta = json::object();
  387. if (!diff.reasoning_content_delta.empty()) {
  388. delta["reasoning_content"] = diff.reasoning_content_delta;
  389. }
  390. if (!diff.content_delta.empty()) {
  391. delta["content"] = diff.content_delta;
  392. }
  393. if (diff.tool_call_index != std::string::npos) {
  394. json tool_call;
  395. tool_call["index"] = diff.tool_call_index;
  396. if (!diff.tool_call_delta.id.empty()) {
  397. tool_call["id"] = diff.tool_call_delta.id;
  398. tool_call["type"] = "function";
  399. }
  400. json function = json::object();
  401. if (!diff.tool_call_delta.name.empty()) {
  402. function["name"] = diff.tool_call_delta.name;
  403. }
  404. function["arguments"] = diff.tool_call_delta.arguments;
  405. tool_call["function"] = function;
  406. delta["tool_calls"] = json::array({tool_call});
  407. }
  408. return delta;
  409. }
  410. bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
  411. if (use_jinja) {
  412. try {
  413. common_chat_msg msg;
  414. msg.role = "user";
  415. msg.content = "test";
  416. auto tmpls = common_chat_templates_init(/* model= */ nullptr, tmpl);
  417. common_chat_templates_inputs inputs;
  418. inputs.messages = {msg};
  419. common_chat_templates_apply(tmpls.get(), inputs);
  420. return true;
  421. } catch (const std::exception & e) {
  422. LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what());
  423. return false;
  424. }
  425. }
  426. llama_chat_message chat[] = {{"user", "test"}};
  427. const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
  428. return res >= 0;
  429. }
  430. std::string common_chat_format_single(
  431. const struct common_chat_templates * tmpls,
  432. const std::vector<common_chat_msg> & past_msg,
  433. const common_chat_msg & new_msg,
  434. bool add_ass,
  435. bool use_jinja) {
  436. common_chat_templates_inputs inputs;
  437. inputs.use_jinja = use_jinja;
  438. inputs.add_bos = tmpls->add_bos;
  439. inputs.add_eos = tmpls->add_eos;
  440. std::string fmt_past_msg;
  441. if (!past_msg.empty()) {
  442. inputs.messages = past_msg;
  443. inputs.add_generation_prompt = false;
  444. fmt_past_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  445. }
  446. std::ostringstream ss;
  447. // if the past_msg ends with a newline, we must preserve it in the formatted version
  448. if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
  449. ss << "\n";
  450. };
  451. // format chat with new_msg
  452. inputs.messages.push_back(new_msg);
  453. inputs.add_generation_prompt = add_ass;
  454. auto fmt_new_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  455. // get the diff part
  456. ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
  457. return ss.str();
  458. }
  459. std::string common_chat_format_example(const struct common_chat_templates * tmpls, bool use_jinja, const std::map<std::string, std::string> & chat_template_kwargs) {
  460. common_chat_templates_inputs inputs;
  461. inputs.use_jinja = use_jinja;
  462. inputs.add_bos = tmpls->add_bos;
  463. inputs.add_eos = tmpls->add_eos;
  464. inputs.chat_template_kwargs = chat_template_kwargs;
  465. auto add_simple_msg = [&](auto role, auto content) {
  466. common_chat_msg msg;
  467. msg.role = role;
  468. msg.content = content;
  469. inputs.messages.push_back(msg);
  470. };
  471. add_simple_msg("system", "You are a helpful assistant");
  472. add_simple_msg("user", "Hello");
  473. add_simple_msg("assistant", "Hi there");
  474. add_simple_msg("user", "How are you?");
  475. return common_chat_templates_apply(tmpls, inputs).prompt;
  476. }
  477. #define CHATML_TEMPLATE_SRC \
  478. "{%- for message in messages -%}\n" \
  479. " {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
  480. "{%- endfor -%}\n" \
  481. "{%- if add_generation_prompt -%}\n" \
  482. " {{- '<|im_start|>assistant\n' -}}\n" \
  483. "{%- endif -%}"
  484. void common_chat_templates_free(struct common_chat_templates * tmpls) {
  485. delete tmpls;
  486. }
  487. bool common_chat_templates_was_explicit(const struct common_chat_templates * tmpls) {
  488. return tmpls->has_explicit_template;
  489. }
  490. const char * common_chat_templates_source(const struct common_chat_templates * tmpls, const char * variant) {
  491. if (variant != nullptr) {
  492. if (strcmp(variant, "tool_use") == 0) {
  493. if (tmpls->template_tool_use) {
  494. return tmpls->template_tool_use->source().c_str();
  495. }
  496. return nullptr;
  497. } else {
  498. LOG_DBG("%s: unknown template variant: %s\n", __func__, variant);
  499. }
  500. }
  501. return tmpls->template_default->source().c_str();
  502. }
  503. common_chat_templates_ptr common_chat_templates_init(
  504. const struct llama_model * model,
  505. const std::string & chat_template_override,
  506. const std::string & bos_token_override,
  507. const std::string & eos_token_override)
  508. {
  509. std::string default_template_src;
  510. std::string template_tool_use_src;
  511. bool has_explicit_template = !chat_template_override.empty();
  512. if (chat_template_override.empty()) {
  513. GGML_ASSERT(model != nullptr);
  514. const auto * str = llama_model_chat_template(model, /* name */ nullptr);
  515. if (str) {
  516. default_template_src = str;
  517. has_explicit_template = true;
  518. }
  519. str = llama_model_chat_template(model, /* name */ "tool_use");
  520. if (str) {
  521. template_tool_use_src = str;
  522. has_explicit_template = true;
  523. }
  524. } else {
  525. default_template_src = chat_template_override;
  526. }
  527. if (default_template_src.empty() || default_template_src == "chatml") {
  528. if (!template_tool_use_src.empty()) {
  529. default_template_src = template_tool_use_src;
  530. } else {
  531. default_template_src = CHATML_TEMPLATE_SRC;
  532. }
  533. }
  534. // TODO @ngxson : this is a temporary hack to prevent chat template from throwing an error
  535. // Ref: https://github.com/ggml-org/llama.cpp/pull/15230#issuecomment-3173959633
  536. if (default_template_src.find("<|channel|>") != std::string::npos
  537. // search for the error message and patch it
  538. && default_template_src.find("in message.content or") != std::string::npos) {
  539. string_replace_all(default_template_src,
  540. "{%- if \"<|channel|>analysis<|message|>\" in message.content or \"<|channel|>final<|message|>\" in message.content %}",
  541. "{%- if false %}");
  542. }
  543. // TODO @aldehir : this is a temporary fix, pending Minja changes
  544. // Ref: https://github.com/ggml-org/llama.cpp/pull/17713#issuecomment-3631342664
  545. if (default_template_src.find("[TOOL_CALLS]") != std::string::npos
  546. // search for the error message and patch it
  547. && default_template_src.find("if (message['content'] is none or") != std::string::npos) {
  548. string_replace_all(default_template_src,
  549. "{%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}",
  550. "{%- if false %}");
  551. }
  552. std::string token_bos = bos_token_override;
  553. std::string token_eos = eos_token_override;
  554. bool add_bos = false;
  555. bool add_eos = false;
  556. if (model) {
  557. const auto * vocab = llama_model_get_vocab(model);
  558. const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
  559. if (token == LLAMA_TOKEN_NULL) {
  560. if (default_template_src.find(jinja_variable_name) != std::string::npos
  561. || template_tool_use_src.find(jinja_variable_name) != std::string::npos) {
  562. LOG_WRN("common_chat_templates_init: warning: vocab does not have a %s token, jinja template won't work as intended.\n", name);
  563. }
  564. return std::string();
  565. }
  566. return common_token_to_piece(vocab, token, true);
  567. };
  568. token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
  569. token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
  570. add_bos = llama_vocab_get_add_bos(vocab);
  571. add_eos = llama_vocab_get_add_eos(vocab);
  572. }
  573. common_chat_templates_ptr tmpls(new common_chat_templates());
  574. tmpls->has_explicit_template = has_explicit_template;
  575. tmpls->add_bos = add_bos;
  576. tmpls->add_eos = add_eos;
  577. try {
  578. tmpls->template_default = std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos);
  579. } catch (const std::exception & e) {
  580. LOG_ERR("%s: failed to parse chat template (defaulting to chatml): %s \n", __func__, e.what());
  581. tmpls->template_default = std::make_unique<minja::chat_template>(CHATML_TEMPLATE_SRC, token_bos, token_eos);
  582. }
  583. if (!template_tool_use_src.empty()) {
  584. try {
  585. tmpls->template_tool_use = std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos);
  586. } catch (const std::exception & e) {
  587. LOG_ERR("%s: failed to parse tool use chat template (ignoring it): %s\n", __func__, e.what());
  588. }
  589. }
  590. return tmpls;
  591. }
  592. const char * common_chat_format_name(common_chat_format format) {
  593. switch (format) {
  594. case COMMON_CHAT_FORMAT_CONTENT_ONLY: return "Content-only";
  595. case COMMON_CHAT_FORMAT_GENERIC: return "Generic";
  596. case COMMON_CHAT_FORMAT_MISTRAL_NEMO: return "Mistral Nemo";
  597. case COMMON_CHAT_FORMAT_MAGISTRAL: return "Magistral";
  598. case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x";
  599. case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools";
  600. case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1";
  601. case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2";
  602. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2";
  603. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1";
  604. case COMMON_CHAT_FORMAT_DEEPSEEK_V3_1: return "DeepSeek V3.1";
  605. case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro";
  606. case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
  607. case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
  608. case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
  609. case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
  610. case COMMON_CHAT_FORMAT_NEMOTRON_V2: return "Nemotron V2";
  611. case COMMON_CHAT_FORMAT_APERTUS: return "Apertus";
  612. case COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS: return "LFM2 with JSON tools";
  613. case COMMON_CHAT_FORMAT_MINIMAX_M2: return "MiniMax-M2";
  614. case COMMON_CHAT_FORMAT_GLM_4_5: return "GLM 4.5";
  615. case COMMON_CHAT_FORMAT_KIMI_K2: return "Kimi K2";
  616. case COMMON_CHAT_FORMAT_QWEN3_CODER_XML: return "Qwen3 Coder";
  617. case COMMON_CHAT_FORMAT_APRIEL_1_5: return "Apriel 1.5";
  618. case COMMON_CHAT_FORMAT_XIAOMI_MIMO: return "Xiaomi MiMo";
  619. case COMMON_CHAT_FORMAT_PEG_SIMPLE: return "peg-simple";
  620. case COMMON_CHAT_FORMAT_PEG_NATIVE: return "peg-native";
  621. case COMMON_CHAT_FORMAT_PEG_CONSTRUCTED: return "peg-constructed";
  622. default:
  623. throw std::runtime_error("Unknown chat format");
  624. }
  625. }
  626. const char * common_reasoning_format_name(common_reasoning_format format) {
  627. switch (format) {
  628. case COMMON_REASONING_FORMAT_NONE: return "none";
  629. case COMMON_REASONING_FORMAT_AUTO: return "auto";
  630. case COMMON_REASONING_FORMAT_DEEPSEEK: return "deepseek";
  631. case COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY: return "deepseek-legacy";
  632. default:
  633. throw std::runtime_error("Unknown reasoning format");
  634. }
  635. }
  636. common_reasoning_format common_reasoning_format_from_name(const std::string & format) {
  637. if (format == "none") {
  638. return COMMON_REASONING_FORMAT_NONE;
  639. } else if (format == "auto") {
  640. return COMMON_REASONING_FORMAT_AUTO;
  641. } else if (format == "deepseek") {
  642. return COMMON_REASONING_FORMAT_DEEPSEEK;
  643. } else if (format == "deepseek-legacy") {
  644. return COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY;
  645. }
  646. throw std::runtime_error("Unknown reasoning format: " + format);
  647. }
  648. static void foreach_function(const json & tools, const std::function<void(const json &)> & fn) {
  649. for (const auto & tool : tools) {
  650. if (!tool.contains("type") || tool.at("type") != "function" || !tool.contains("function")) {
  651. LOG_INF("Skipping tool without function: %s", tool.dump(2).c_str());
  652. continue;
  653. }
  654. fn(tool);
  655. }
  656. }
  657. static std::string apply(
  658. const common_chat_template & tmpl,
  659. const struct templates_params & inputs,
  660. const std::optional<json> & messages_override = std::nullopt,
  661. const std::optional<json> & tools_override = std::nullopt,
  662. const std::optional<json> & additional_context = std::nullopt)
  663. {
  664. minja::chat_template_inputs tmpl_inputs;
  665. tmpl_inputs.messages = messages_override ? *messages_override : inputs.messages;
  666. if (tools_override) {
  667. tmpl_inputs.tools = *tools_override;
  668. } else {
  669. tmpl_inputs.tools = inputs.tools.empty() ? json() : inputs.tools;
  670. }
  671. tmpl_inputs.add_generation_prompt = inputs.add_generation_prompt;
  672. tmpl_inputs.extra_context = inputs.extra_context;
  673. tmpl_inputs.extra_context["enable_thinking"] = inputs.enable_thinking;
  674. if (additional_context) {
  675. tmpl_inputs.extra_context.merge_patch(*additional_context);
  676. }
  677. // TODO: add flag to control date/time, if only for testing purposes.
  678. // tmpl_inputs.now = std::chrono::system_clock::now();
  679. minja::chat_template_options tmpl_opts;
  680. // To avoid double BOS / EOS tokens, we're manually removing begining / trailing tokens
  681. // instead of using `chat_template_options.use_bos_token = false`, since these tokens
  682. // may be needed inside the template / between messages too.
  683. auto result = tmpl.apply(tmpl_inputs, tmpl_opts);
  684. if (inputs.add_bos && string_starts_with(result, tmpl.bos_token())) {
  685. result = result.substr(tmpl.bos_token().size());
  686. }
  687. if (inputs.add_eos && string_ends_with(result, tmpl.eos_token())) {
  688. result = result.substr(0, result.size() - tmpl.eos_token().size());
  689. }
  690. return result;
  691. }
  692. static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct templates_params & inputs) {
  693. common_chat_params data;
  694. auto tool_call_schemas = json::array();
  695. foreach_function(inputs.tools, [&](const json & tool) {
  696. const auto & function = tool.at("function");
  697. auto tool_schema = json {
  698. {"type", "object"},
  699. {"properties", {
  700. {"name", {
  701. {"type", "string"},
  702. {"const", function.at("name")},
  703. }},
  704. {"arguments", function.at("parameters")},
  705. }},
  706. {"required", json::array({"name", "arguments"})},
  707. };
  708. if (function.contains("description")) {
  709. tool_schema["description"] = function.at("description");
  710. }
  711. if (inputs.parallel_tool_calls) {
  712. tool_schema.at("properties")["id"] = {
  713. {"type", "string"},
  714. {"minLength", 4},
  715. };
  716. tool_schema.at("required").push_back("id");
  717. }
  718. tool_call_schemas.emplace_back(tool_schema);
  719. });
  720. const auto tool_call =
  721. inputs.parallel_tool_calls
  722. ? json {
  723. {"type", "object"},
  724. {"properties", {
  725. {"tool_calls", {
  726. {"type", "array"},
  727. {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  728. {"anyOf", tool_call_schemas},
  729. }},
  730. {"minItems", 1},
  731. }},
  732. }},
  733. {"required", json::array({"tool_calls"})},
  734. }
  735. : json {
  736. {"type", "object"},
  737. {"properties", {
  738. {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  739. {"anyOf", tool_call_schemas},
  740. }},
  741. }},
  742. {"required", json::array({"tool_call"})},
  743. };
  744. const auto schema =
  745. inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED
  746. ? json {
  747. {"anyOf", json::array({
  748. tool_call,
  749. {
  750. {"type", "object"},
  751. {"properties", {
  752. {"response", inputs.json_schema.is_null()
  753. ? json {{"type", "string"}}
  754. : inputs.json_schema
  755. },
  756. }},
  757. {"required", json::array({"response"})},
  758. },
  759. })}
  760. }
  761. : tool_call;
  762. data.grammar_lazy = false;
  763. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  764. builder.add_schema("root", schema);
  765. });
  766. auto tweaked_messages = common_chat_template::add_system(
  767. inputs.messages,
  768. "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request");
  769. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  770. data.format = COMMON_CHAT_FORMAT_GENERIC;
  771. return data;
  772. }
  773. static common_chat_params common_chat_params_init_mistral_nemo(const common_chat_template & tmpl, const struct templates_params & inputs) {
  774. common_chat_params data;
  775. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  776. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  777. auto schemas = json::array();
  778. foreach_function(inputs.tools, [&](const json & tool) {
  779. const auto & function = tool.at("function");
  780. schemas.push_back({
  781. {"type", "object"},
  782. {"properties", {
  783. // Important note: the model is probably trained to take a JSON stringified arguments value.
  784. // It's hard to constrain that for now (while reusing the JSON schema conversion), so we're just expecting a plain object.
  785. {"name", {
  786. {"type", "string"},
  787. {"const", function.at("name")},
  788. }},
  789. {"arguments", function.at("parameters")},
  790. {"id", {
  791. {"type", "string"},
  792. // Nemo's template expects a 9-character alphanumeric ID.
  793. {"pattern", "^[a-zA-Z0-9]{9}$"},
  794. }},
  795. }},
  796. {"required", json::array({"name", "arguments", "id"})},
  797. });
  798. });
  799. auto schema = json {
  800. {"type", "array"},
  801. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  802. {"minItems", 1},
  803. };
  804. if (!inputs.parallel_tool_calls) {
  805. schema["maxItems"] = 1;
  806. }
  807. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  808. });
  809. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  810. data.preserved_tokens = {
  811. "[TOOL_CALLS]",
  812. };
  813. data.prompt = apply(tmpl, inputs);
  814. data.format = COMMON_CHAT_FORMAT_MISTRAL_NEMO;
  815. return data;
  816. }
  817. // Case-insensitive find
  818. static size_t ifind_string(const std::string & haystack, const std::string & needle, size_t pos = 0) {
  819. auto it = std::search(
  820. haystack.begin() + pos, haystack.end(),
  821. needle.begin(), needle.end(),
  822. [](char a, char b) { return std::tolower(a) == std::tolower(b); }
  823. );
  824. return (it == haystack.end()) ? std::string::npos : std::distance(haystack.begin(), it);
  825. }
  826. static common_chat_params common_chat_params_init_lfm2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  827. common_chat_params data;
  828. const auto is_json_schema_provided = !inputs.json_schema.is_null();
  829. const auto is_grammar_provided = !inputs.grammar.empty();
  830. const auto are_tools_provided = inputs.tools.is_array() && !inputs.tools.empty();
  831. // the logic requires potentially modifying the messages
  832. auto tweaked_messages = inputs.messages;
  833. auto replace_json_schema_marker = [](json & messages) -> bool {
  834. static std::string marker1 = "force json schema.\n";
  835. static std::string marker2 = "force json schema.";
  836. if (messages.empty() || messages.at(0).at("role") != "system") {
  837. return false;
  838. }
  839. std::string content = messages.at(0).at("content");
  840. for (const auto & marker : {marker1, marker2}) {
  841. const auto pos = ifind_string(content, marker);
  842. if (pos != std::string::npos) {
  843. content.replace(pos, marker.length(), "");
  844. // inject modified content back into the messages
  845. messages.at(0).at("content") = content;
  846. return true;
  847. }
  848. }
  849. return false;
  850. };
  851. // Lfm2 model does not natively work with json, but can generally understand the tools structure
  852. //
  853. // Example of the pytorch dialog structure:
  854. // <|startoftext|><|im_start|>system
  855. // List of tools: <|tool_list_start|>[{"name": "get_candidate_status", "description": "Retrieves the current status of a candidate in the recruitment process", "parameters": {"type": "object", "properties": {"candidate_id": {"type": "string", "description": "Unique identifier for the candidate"}}, "required": ["candidate_id"]}}]<|tool_list_end|><|im_end|>
  856. // <|im_start|>user
  857. // What is the current status of candidate ID 12345?<|im_end|>
  858. // <|im_start|>assistant
  859. // <|tool_call_start|>[get_candidate_status(candidate_id="12345")]<|tool_call_end|>Checking the current status of candidate ID 12345.<|im_end|>
  860. // <|im_start|>tool
  861. // <|tool_response_start|>{"candidate_id": "12345", "status": "Interview Scheduled", "position": "Clinical Research Associate", "date": "2023-11-20"}<|tool_response_end|><|im_end|>
  862. // <|im_start|>assistant
  863. // The candidate with ID 12345 is currently in the "Interview Scheduled" stage for the position of Clinical Research Associate, with an interview date set for 2023-11-20.<|im_end|>
  864. //
  865. // For the llama server compatibility with json tools semantic,
  866. // the client can add "Follow json schema." line into the system message prompt to force the json output.
  867. //
  868. if (are_tools_provided && (is_json_schema_provided || is_grammar_provided)) {
  869. // server/utils.hpp prohibits that branch for the custom grammar anyways
  870. throw std::runtime_error("Tools call must not use \"json_schema\" or \"grammar\", use non-tool invocation if you want to use custom grammar");
  871. } else if (are_tools_provided && replace_json_schema_marker(tweaked_messages)) {
  872. LOG_INF("%s: Using tools to build a grammar\n", __func__);
  873. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  874. auto schemas = json::array();
  875. foreach_function(inputs.tools, [&](const json & tool) {
  876. const auto & function = tool.at("function");
  877. schemas.push_back({
  878. {"type", "object"},
  879. {"properties", {
  880. {"name", {
  881. {"type", "string"},
  882. {"const", function.at("name")},
  883. }},
  884. {"arguments", function.at("parameters")},
  885. }},
  886. {"required", json::array({"name", "arguments", "id"})},
  887. });
  888. });
  889. auto schema = json {
  890. {"type", "array"},
  891. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  892. {"minItems", 1},
  893. };
  894. if (!inputs.parallel_tool_calls) {
  895. schema["maxItems"] = 1;
  896. }
  897. builder.add_rule("root", "\"<|tool_call_start|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tool_call_end|>\"");
  898. });
  899. // model has no concept of tool selection mode choice,
  900. // if the system prompt rendered correctly it will produce a tool call
  901. // the grammar goes inside the tool call body
  902. data.grammar_lazy = true;
  903. data.grammar_triggers = {{COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL, "\\s*<\\|tool_call_start\\|>\\s*\\["}};
  904. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  905. data.format = COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS;
  906. } else if (are_tools_provided && (!is_json_schema_provided && !is_grammar_provided)) {
  907. LOG_INF("%s: Using tools without json schema or grammar\n", __func__);
  908. // output those tokens
  909. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  910. } else if (is_json_schema_provided) {
  911. LOG_INF("%s: Using provided json schema to build a grammar\n", __func__);
  912. data.grammar = json_schema_to_grammar(inputs.json_schema);
  913. } else if (is_grammar_provided) {
  914. LOG_INF("%s: Using provided grammar\n", __func__);
  915. data.grammar = inputs.grammar;
  916. } else {
  917. LOG_INF("%s: Using content relying on the template\n", __func__);
  918. }
  919. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  920. LOG_DBG("%s: Prompt: %s\n", __func__, data.prompt.c_str());
  921. return data;
  922. }
  923. static common_chat_params common_chat_params_init_ministral_3(const common_chat_template & tmpl, const struct templates_params & inputs) {
  924. common_chat_params data;
  925. // Build up messages to follow the format: https://huggingface.co/mistralai/Ministral-3-14B-Reasoning-2512/blob/main/chat_template.jinja
  926. auto adjusted_messages = json::array();
  927. for (const auto & msg : inputs.messages) {
  928. auto role = msg.value("role", "");
  929. if (role != "system" && role != "assistant") {
  930. // Only adjust system and assistant messages. Interestingly, the system message may contain thinking.
  931. adjusted_messages.push_back(msg);
  932. continue;
  933. }
  934. auto content = json::array();
  935. // If message contains `reasoning_content`, add it as a block of type `thinking`
  936. if (msg.contains("reasoning_content") && msg.at("reasoning_content").is_string()) {
  937. content.push_back({
  938. {"type", "thinking"},
  939. {"thinking", msg.at("reasoning_content").get<std::string>()},
  940. });
  941. }
  942. // If message contains `content`, add it as a block of type `text`
  943. if (msg.contains("content")) {
  944. if (msg.at("content").is_string()) {
  945. content.push_back({
  946. {"type", "text"},
  947. {"text", msg.at("content").get<std::string>()},
  948. });
  949. } else if (msg.at("content").is_array()) {
  950. auto blocks = msg.at("content");
  951. content.insert(content.end(), blocks.begin(), blocks.end());
  952. }
  953. }
  954. auto adjusted = msg;
  955. adjusted["content"] = content;
  956. adjusted.erase("reasoning_content");
  957. adjusted_messages.push_back(adjusted);
  958. }
  959. auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
  960. auto extract_reasoning = inputs.reasoning_format != COMMON_REASONING_FORMAT_NONE;
  961. auto include_grammar = true;
  962. data.prompt = apply(tmpl, inputs, /* messages_override = */ adjusted_messages);
  963. data.format = COMMON_CHAT_FORMAT_PEG_NATIVE;
  964. data.preserved_tokens = {
  965. "[THINK]",
  966. "[/THINK]",
  967. "[TOOL_CALLS]",
  968. "[ARGS]",
  969. };
  970. auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder & p) {
  971. auto reasoning = extract_reasoning ? p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]") : p.eps();
  972. // Response format parser
  973. if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) {
  974. // Ministral wants to emit json surrounded by code fences
  975. return reasoning << "```json" << p.content(p.schema(p.json(), "response-format", inputs.json_schema)) << "```";
  976. }
  977. // Tool call parser
  978. if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
  979. auto tool_choice = p.choice();
  980. foreach_function(inputs.tools, [&](const json & tool) {
  981. const auto & function = tool.at("function");
  982. std::string name = function.at("name");
  983. const auto & schema = function.at("parameters");
  984. tool_choice |= p.rule("tool-" + name,
  985. p.tool_open(p.tool_name(p.literal(name)) + "[ARGS]")
  986. + p.tool_args(p.schema(p.json(), "tool-" + name + "-schema", schema))
  987. );
  988. });
  989. auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0;
  990. auto max_calls = inputs.parallel_tool_calls ? -1 : 1;
  991. auto tool_calls = p.trigger_rule("tool-call", p.repeat("[TOOL_CALLS]" + tool_choice, min_calls, max_calls));
  992. return reasoning << p.content(p.until("[TOOL_CALLS]")) << tool_calls;
  993. }
  994. // Content only parser
  995. include_grammar = false;
  996. return reasoning << p.content(p.rest());
  997. });
  998. data.parser = parser.save();
  999. if (include_grammar) {
  1000. data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO;
  1001. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1002. foreach_function(inputs.tools, [&](const json & tool) {
  1003. const auto & function = tool.at("function");
  1004. auto schema = function.at("parameters");
  1005. builder.resolve_refs(schema);
  1006. });
  1007. parser.build_grammar(builder, data.grammar_lazy);
  1008. });
  1009. data.grammar_triggers = {
  1010. {COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"}
  1011. };
  1012. }
  1013. return data;
  1014. }
  1015. static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1016. common_chat_params data;
  1017. data.prompt = apply(tmpl, inputs);
  1018. data.format = COMMON_CHAT_FORMAT_MAGISTRAL;
  1019. data.preserved_tokens = {
  1020. "[THINK]",
  1021. "[/THINK]",
  1022. };
  1023. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1024. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1025. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1026. auto schemas = json::array();
  1027. foreach_function(inputs.tools, [&](const json & tool) {
  1028. const auto & function = tool.at("function");
  1029. schemas.push_back({
  1030. {"type", "object"},
  1031. {"properties", {
  1032. {"name", {
  1033. {"type", "string"},
  1034. {"const", function.at("name")},
  1035. }},
  1036. {"arguments", function.at("parameters")},
  1037. {"id", {
  1038. {"type", "string"},
  1039. {"pattern", "^[a-zA-Z0-9]{9}$"},
  1040. }},
  1041. }},
  1042. {"required", json::array({"name", "arguments", "id"})},
  1043. });
  1044. });
  1045. auto schema = json {
  1046. {"type", "array"},
  1047. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1048. {"minItems", 1},
  1049. };
  1050. if (!inputs.parallel_tool_calls) {
  1051. schema["maxItems"] = 1;
  1052. }
  1053. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  1054. });
  1055. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  1056. data.preserved_tokens.push_back("[TOOL_CALLS]");
  1057. } else {
  1058. data.grammar_lazy = false;
  1059. if (!inputs.json_schema.is_null()) {
  1060. if (!inputs.grammar.empty()) {
  1061. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  1062. }
  1063. data.grammar = json_schema_to_grammar(inputs.json_schema);
  1064. } else {
  1065. data.grammar = inputs.grammar;
  1066. }
  1067. }
  1068. return data;
  1069. }
  1070. static common_chat_params common_chat_params_init_command_r7b(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1071. common_chat_params data;
  1072. auto adjusted_messages = json::array();
  1073. for (const auto & msg : inputs.messages) {
  1074. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  1075. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  1076. if (has_reasoning_content && has_tool_calls) {
  1077. auto adjusted_message = msg;
  1078. adjusted_message["tool_plan"] = msg.at("reasoning_content");
  1079. adjusted_message.erase("reasoning_content");
  1080. adjusted_messages.push_back(adjusted_message);
  1081. } else {
  1082. adjusted_messages.push_back(msg);
  1083. }
  1084. }
  1085. data.prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  1086. data.format = COMMON_CHAT_FORMAT_COMMAND_R7B;
  1087. if (string_ends_with(data.prompt, "<|START_THINKING|>")) {
  1088. if (!inputs.enable_thinking) {
  1089. data.prompt += "<|END_THINKING|>";
  1090. } else {
  1091. data.thinking_forced_open = true;
  1092. }
  1093. } else if (!inputs.enable_thinking && string_ends_with(data.prompt, "<|CHATBOT_TOKEN|>")) {
  1094. data.prompt += "<|START_THINKING|><|END_THINKING|>";
  1095. }
  1096. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1097. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1098. auto schemas = json::array();
  1099. foreach_function(inputs.tools, [&](const json & tool) {
  1100. const auto & function = tool.at("function");
  1101. schemas.push_back({
  1102. {"type", "object"},
  1103. {"properties", {
  1104. {"tool_call_id", {
  1105. {"type", "string"},
  1106. // Command-R's template expects an integer string.
  1107. {"pattern", "^[0-9]{1,10}$"},
  1108. }},
  1109. {"tool_name", {
  1110. {"type", "string"},
  1111. {"const", function.at("name")},
  1112. }},
  1113. {"parameters", function.at("parameters")},
  1114. }},
  1115. {"required", json::array({"tool_call_id", "tool_name", "parameters"})},
  1116. });
  1117. });
  1118. auto schema = json {
  1119. {"type", "array"},
  1120. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1121. {"minItems", 1},
  1122. };
  1123. if (!inputs.parallel_tool_calls) {
  1124. schema["maxItems"] = 1;
  1125. }
  1126. builder.add_rule("root",
  1127. std::string(data.thinking_forced_open ? "( \"<|END_THINKING|>\" space )? " : "") +
  1128. "\"<|START_ACTION|>\" " + builder.add_schema("tool_calls", schema) + " \"<|END_ACTION|>\"");
  1129. });
  1130. data.grammar_triggers.push_back({
  1131. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1132. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1133. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1134. std::string(data.thinking_forced_open ? "[\\s\\S]*?(<\\|END_THINKING\\|>\\s*)" : "(?:<\\|START_THINKING\\|>[\\s\\S]*?<\\|END_THINKING\\|>\\s*)?") +
  1135. "(<\\|START_ACTION\\|>)[\\s\\S]*"
  1136. });
  1137. data.preserved_tokens = {
  1138. "<|START_ACTION|>",
  1139. "<|END_ACTION|>",
  1140. "<|START_RESPONSE|>",
  1141. "<|END_RESPONSE|>",
  1142. "<|START_THINKING|>",
  1143. "<|END_THINKING|>",
  1144. };
  1145. return data;
  1146. }
  1147. static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector<std::string> & expected_properties) {
  1148. if (!parameters.is_object() || !parameters.contains("type") || parameters.at("type") != "object" || !parameters.contains("properties") || !parameters.contains("required")) {
  1149. throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties");
  1150. }
  1151. const auto & parameters_properties = parameters.at("properties");
  1152. const auto & parameters_required = parameters.at("required");
  1153. for (const auto & prop : expected_properties) {
  1154. if (!parameters_properties.contains(prop)) {
  1155. throw std::runtime_error("Parameters of tool " + name + " is missing property: " + prop); // NOLINT
  1156. }
  1157. if (std::find(parameters_required.begin(), parameters_required.end(), json(prop)) == parameters_required.end()) {
  1158. throw std::runtime_error("Parameters of tool " + name + " must have property marked as required: " + prop); // NOLINT
  1159. }
  1160. }
  1161. if (parameters_properties.size() != expected_properties.size()) {
  1162. throw std::runtime_error("Parameters of tool " + name + " must only have these properties:" + string_join(expected_properties, ", "));
  1163. }
  1164. }
  1165. static common_chat_params common_chat_params_init_llama_3_x(const common_chat_template & tmpl, const struct templates_params & inputs, bool allow_python_tag_builtin_tools) {
  1166. auto builtin_tools = json::array();
  1167. common_chat_params data;
  1168. if (!inputs.tools.is_null()) {
  1169. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1170. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1171. std::vector<std::string> tool_rules;
  1172. auto handle_builtin_tool = [&](const std::string & name, const json & parameters) {
  1173. if (name == "wolfram_alpha" || name == "web_search" || name == "brave_search") {
  1174. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
  1175. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
  1176. expect_tool_parameters(name, parameters, {"query"});
  1177. } else if (name == "python" || name == "code_interpreter") {
  1178. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
  1179. expect_tool_parameters(name, parameters, {"code"});
  1180. } else {
  1181. return false;
  1182. }
  1183. std::vector<std::string> kvs;
  1184. for (const auto & [key, value] : parameters.at("properties").items()) {
  1185. kvs.push_back("\"" + key + "=\" " + builder.add_schema(name + "-args-" + key, value)); // NOLINT
  1186. }
  1187. tool_rules.push_back(
  1188. builder.add_rule(
  1189. name + "-call",
  1190. "\"<|python_tag|>" + name + ".call(\" " + string_join(kvs, " \", \" ") + " \")\""));
  1191. builtin_tools.push_back(name);
  1192. return true;
  1193. };
  1194. foreach_function(inputs.tools, [&](const json & tool) {
  1195. const auto & function = tool.at("function");
  1196. std::string name = function.at("name");
  1197. auto parameters = function.at("parameters");
  1198. builder.resolve_refs(parameters);
  1199. // https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote/tool_runtime
  1200. if (allow_python_tag_builtin_tools) {
  1201. handle_builtin_tool(name, parameters);
  1202. }
  1203. tool_rules.push_back(
  1204. builder.add_rule(
  1205. name + "-call",
  1206. "\"{\" space "
  1207. "( \"\\\"type\\\"\" space \":\" space \"\\\"function\\\"\" space \",\" space )? "
  1208. " \"\\\"name\\\"\" space \":\" space \"\\\"" + name + "\\\"\" space \",\" space "
  1209. " \"\\\"parameters\\\"\" space \":\" space " + builder.add_schema(name + "-args", parameters) + " "
  1210. "\"}\" space"));
  1211. });
  1212. // Small models may hallucinate function names so we match anything (*at the start*) that looks like the JSON of a function call, regardless of the name.
  1213. data.grammar_triggers.push_back({
  1214. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1215. "(\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\")[\\s\\S]*", // + name + "\"[\\s\\S]*",
  1216. });
  1217. if (!builtin_tools.empty()) {
  1218. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1219. data.preserved_tokens.push_back("<|python_tag|>");
  1220. }
  1221. // Allow a few empty lines on top of the usual constrained json schema space rule.
  1222. builder.add_rule("root", string_join(tool_rules, " | "));
  1223. data.additional_stops.push_back("<|eom_id|>");
  1224. });
  1225. data.format = allow_python_tag_builtin_tools && !builtin_tools.empty()
  1226. ? COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS
  1227. : COMMON_CHAT_FORMAT_LLAMA_3_X;
  1228. } else {
  1229. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1230. }
  1231. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, json {
  1232. {"date_string", format_time(inputs.now, "%d %b %Y")},
  1233. {"tools_in_user_message", false},
  1234. {"builtin_tools", builtin_tools.empty() ? json() : builtin_tools},
  1235. });
  1236. return data;
  1237. }
  1238. static common_chat_params common_chat_params_init_nemotron_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1239. common_chat_params data;
  1240. // Generate the prompt using the apply() function with the template
  1241. data.prompt = apply(tmpl, inputs);
  1242. data.format = COMMON_CHAT_FORMAT_NEMOTRON_V2;
  1243. // Handle thinking tags appropriately based on inputs.enable_thinking
  1244. if (string_ends_with(data.prompt, "<think>\n")) {
  1245. if (!inputs.enable_thinking) {
  1246. data.prompt += "</think>";
  1247. } else {
  1248. data.thinking_forced_open = true;
  1249. }
  1250. }
  1251. // When tools are present, build grammar for the <TOOLCALL> format, similar to CommandR, but without tool call ID
  1252. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1253. data.grammar_lazy = true;
  1254. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1255. auto schemas = json::array();
  1256. foreach_function(inputs.tools, [&](const json & tool) {
  1257. const auto & function = tool.at("function");
  1258. schemas.push_back({
  1259. { "type", "object" },
  1260. { "properties",
  1261. {
  1262. { "name",
  1263. {
  1264. { "type", "string" },
  1265. { "const", function.at("name") },
  1266. } },
  1267. { "arguments", function.at("parameters") },
  1268. } },
  1269. { "required", json::array({ "name", "arguments" }) },
  1270. });
  1271. });
  1272. auto schema = json{
  1273. { "type", "array" },
  1274. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1275. { "minItems", 1 },
  1276. };
  1277. if (!inputs.parallel_tool_calls) {
  1278. schema["maxItems"] = 1;
  1279. }
  1280. builder.add_rule("root",
  1281. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1282. "\"<TOOLCALL>\" " + builder.add_schema("tool_calls", schema) +
  1283. " \"</TOOLCALL>\"");
  1284. });
  1285. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1286. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1287. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1288. std::string(data.thinking_forced_open ?
  1289. "[\\s\\S]*?(</think>\\s*)" :
  1290. "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1291. "(<TOOLCALL>)[\\s\\S]*" });
  1292. }
  1293. return data;
  1294. }
  1295. static common_chat_params common_chat_params_init_apertus(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1296. common_chat_params data;
  1297. // Generate the prompt using the apply() function with the template
  1298. data.prompt = apply(tmpl, inputs);
  1299. data.format = COMMON_CHAT_FORMAT_APERTUS;
  1300. // Handle thinking tags appropriately based on inputs.enable_thinking
  1301. if (string_ends_with(data.prompt, "<|inner_prefix|>")) {
  1302. if (!inputs.enable_thinking) {
  1303. data.prompt += "<|inner_suffix|>";
  1304. } else {
  1305. data.thinking_forced_open = true;
  1306. }
  1307. }
  1308. // When tools are present, build grammar for the <|tools_prefix|> format
  1309. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1310. data.grammar_lazy = true;
  1311. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1312. auto schemas = json::array();
  1313. foreach_function(inputs.tools, [&](const json & tool) {
  1314. const auto & function = tool.at("function");
  1315. schemas.push_back({
  1316. { "type", "object" },
  1317. { "properties",
  1318. {
  1319. { function.at("name"), function.at("parameters") }
  1320. } },
  1321. { "required", json::array({ function.at("name") }) },
  1322. });
  1323. });
  1324. auto schema = json{
  1325. { "type", "array" },
  1326. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1327. { "minItems", 1 },
  1328. };
  1329. if (!inputs.parallel_tool_calls) {
  1330. schema["maxItems"] = 1;
  1331. }
  1332. builder.add_rule("root",
  1333. std::string(data.thinking_forced_open ? "( \"<|inner_suffix|>\" space )? " : "") +
  1334. "\"<|tools_prefix|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tools_suffix|>\"");
  1335. });
  1336. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1337. // If thinking_forced_open, then we capture the <|inner_suffix|> tag in the grammar,
  1338. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1339. std::string(data.thinking_forced_open ?
  1340. "[\\s\\S]*?(<\\|inner_suffix\\|>\\s*)" :
  1341. "(?:<\\|inner_prefix\\|>[\\s\\S]*?<\\|inner_suffix\\|>\\s*)?") +
  1342. "(<\\|tools_prefix\\|>)[\\s\\S]*" });
  1343. data.preserved_tokens = {
  1344. "<|system_start|>",
  1345. "<|system_end|>",
  1346. "<|developer_start|>",
  1347. "<|developer_end|>",
  1348. "<|user_start|>",
  1349. "<|user_end|>",
  1350. "<|assistant_start|>",
  1351. "<|assistant_end|>",
  1352. "<|inner_prefix|>",
  1353. "<|inner_suffix|>",
  1354. "<|tools_prefix|>",
  1355. "<|tools_suffix|>",
  1356. };
  1357. }
  1358. return data;
  1359. }
  1360. static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1361. common_chat_params data;
  1362. auto prompt = apply(tmpl, inputs);
  1363. // Hacks to fix the official (broken) prompt.
  1364. // It is advisable to use --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja instead,
  1365. // until the official template is fixed.
  1366. if (tmpl.source().find("{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}") != std::string::npos) {
  1367. // Don't leave the chat dangling after tool results
  1368. if (string_ends_with(prompt, "<|tool▁outputs▁end|>")) {
  1369. prompt += "<|end▁of▁sentence|>";
  1370. if (inputs.add_generation_prompt) {
  1371. prompt += "<|Assistant|>";
  1372. }
  1373. }
  1374. // Fix up tool call delta example added by Minja
  1375. prompt = std::regex_replace(
  1376. prompt,
  1377. std::regex("(<|tool▁call▁end|>)[\\s\\r\\n]*(<|tool▁outputs▁begin|>|<|User|>)"),
  1378. "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2");
  1379. }
  1380. data.prompt = prompt;
  1381. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_R1;
  1382. if (string_ends_with(data.prompt, "<think>\n")) {
  1383. if (!inputs.enable_thinking) {
  1384. data.prompt += "</think>";
  1385. } else {
  1386. data.thinking_forced_open = true;
  1387. }
  1388. }
  1389. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1390. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1391. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1392. std::vector<std::string> tool_rules;
  1393. foreach_function(inputs.tools, [&](const json & tool) {
  1394. const auto & function = tool.at("function");
  1395. std::string name = function.at("name");
  1396. auto parameters = function.at("parameters");
  1397. builder.resolve_refs(parameters);
  1398. tool_rules.push_back(builder.add_rule(name + "-call",
  1399. "( \"<|tool▁call▁begin|>\" )? \"function<|tool▁sep|>" + name + "\\n"
  1400. "```json\\n\" " + builder.add_schema(name + "-args", parameters) + " "
  1401. "\"```<|tool▁call▁end|>\""));
  1402. });
  1403. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1404. // so we accept common variants (then it's all constrained)
  1405. builder.add_rule("root",
  1406. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1407. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1408. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1409. "\"<|tool▁calls▁end|>\""
  1410. " space");
  1411. data.grammar_triggers.push_back({
  1412. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1413. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1414. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1415. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1416. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1417. });
  1418. data.preserved_tokens = {
  1419. "<think>",
  1420. "</think>",
  1421. "<|tool▁calls▁begin|>",
  1422. "<|tool▁call▁begin|>",
  1423. "<|tool▁sep|>",
  1424. "<|tool▁call▁end|>",
  1425. "<|tool▁calls▁end|",
  1426. };
  1427. });
  1428. }
  1429. return data;
  1430. }
  1431. static common_chat_params common_chat_params_init_deepseek_v3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1432. common_chat_params data;
  1433. // Pass thinking context for DeepSeek V3.1 template
  1434. json additional_context = {
  1435. {"thinking", inputs.enable_thinking},
  1436. };
  1437. auto prompt = apply(tmpl, inputs,
  1438. /* messages_override= */ inputs.messages,
  1439. /* tools_override= */ std::nullopt,
  1440. additional_context);
  1441. data.prompt = prompt;
  1442. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_V3_1;
  1443. if (string_ends_with(data.prompt, "<think>")) {
  1444. if (!inputs.enable_thinking) {
  1445. data.prompt += "</think>";
  1446. } else {
  1447. data.thinking_forced_open = true;
  1448. }
  1449. }
  1450. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1451. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1452. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1453. std::vector<std::string> tool_rules;
  1454. foreach_function(inputs.tools, [&](const json & tool) {
  1455. const auto & function = tool.at("function");
  1456. std::string name = function.at("name");
  1457. auto parameters = function.at("parameters");
  1458. builder.resolve_refs(parameters);
  1459. tool_rules.push_back(builder.add_rule(name + "-call",
  1460. "( \"<|tool▁call▁begin|>\" )? \"" + name + "<|tool▁sep|>"
  1461. "\" " + builder.add_schema(name + "-args", parameters) + " "
  1462. "\"<|tool▁call▁end|>\""));
  1463. });
  1464. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1465. // so we accept common variants (then it's all constrained)
  1466. builder.add_rule("root",
  1467. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1468. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1469. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1470. "\"<|tool▁calls▁end|>\""
  1471. " space");
  1472. data.grammar_triggers.push_back({
  1473. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1474. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1475. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1476. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1477. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1478. });
  1479. data.preserved_tokens = {
  1480. "<think>",
  1481. "</think>",
  1482. "<|tool▁calls▁begin|>",
  1483. "<|tool▁call▁begin|>",
  1484. "<|tool▁sep|>",
  1485. "<|tool▁call▁end|>",
  1486. "<|tool▁calls▁end|>",
  1487. };
  1488. });
  1489. }
  1490. return data;
  1491. }
  1492. static common_chat_params common_chat_params_init_minimax_m2(const common_chat_template & tmpl, const struct templates_params & params) {
  1493. common_chat_params data;
  1494. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1495. data.prompt = apply(tmpl, params);
  1496. data.format = COMMON_CHAT_FORMAT_MINIMAX_M2;
  1497. // Handle thinking tags based on prompt ending
  1498. if (string_ends_with(data.prompt, "<think>\n")) {
  1499. if (!params.enable_thinking) {
  1500. // Close the thinking tag immediately if thinking is disabled
  1501. data.prompt += "</think>\n\n";
  1502. } else {
  1503. // Mark thinking as forced open (template started with <think>)
  1504. data.thinking_forced_open = true;
  1505. }
  1506. }
  1507. // Preserve MiniMax-M2 special tokens
  1508. data.preserved_tokens = {
  1509. "<think>",
  1510. "</think>",
  1511. "<minimax:tool_call>",
  1512. "</minimax:tool_call>",
  1513. };
  1514. // build grammar for tool call
  1515. static const xml_tool_call_format form {
  1516. /* form.scope_start = */ "<minimax:tool_call>\n",
  1517. /* form.tool_start = */ "<invoke name=\"",
  1518. /* form.tool_sep = */ "\">\n",
  1519. /* form.key_start = */ "<parameter name=\"",
  1520. /* form.key_val_sep = */ "\">",
  1521. /* form.val_end = */ "</parameter>\n",
  1522. /* form.tool_end = */ "</invoke>\n",
  1523. /* form.scope_end = */ "</minimax:tool_call>",
  1524. };
  1525. build_grammar_xml_tool_call(data, params.tools, form);
  1526. return data;
  1527. }
  1528. static common_chat_params common_chat_params_init_qwen3_coder_xml(const common_chat_template & tmpl, const struct templates_params & params) {
  1529. common_chat_params data;
  1530. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1531. data.prompt = apply(tmpl, params);
  1532. data.format = COMMON_CHAT_FORMAT_QWEN3_CODER_XML;
  1533. data.preserved_tokens = {
  1534. "<tool_call>",
  1535. "</tool_call>",
  1536. "<function=",
  1537. "</function>",
  1538. "<parameter=",
  1539. "</parameter>",
  1540. };
  1541. // build grammar for tool call
  1542. static const xml_tool_call_format form {
  1543. /* form.scope_start = */ "<tool_call>\n",
  1544. /* form.tool_start = */ "<function=",
  1545. /* form.tool_sep = */ ">\n",
  1546. /* form.key_start = */ "<parameter=",
  1547. /* form.key_val_sep = */ ">\n",
  1548. /* form.val_end = */ "\n</parameter>\n",
  1549. /* form.tool_end = */ "</function>\n",
  1550. /* form.scope_end = */ "</tool_call>",
  1551. };
  1552. build_grammar_xml_tool_call(data, params.tools, form);
  1553. return data;
  1554. }
  1555. static common_chat_params common_chat_params_init_kimi_k2(const common_chat_template & tmpl, const struct templates_params & params) {
  1556. common_chat_params data;
  1557. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1558. data.prompt = apply(tmpl, params);
  1559. data.format = COMMON_CHAT_FORMAT_KIMI_K2;
  1560. data.preserved_tokens = {
  1561. "<think>",
  1562. "</think>",
  1563. "<|tool_calls_section_begin|>",
  1564. "<|tool_call_begin|>",
  1565. "<|tool_call_argument_begin|>",
  1566. "<|tool_call_end|>",
  1567. "<|tool_calls_section_end|>",
  1568. "<|im_end|>",
  1569. "<|im_system|>",
  1570. "<|im_middle|>",
  1571. };
  1572. data.additional_stops.insert(data.additional_stops.end(), {
  1573. "<|im_end|>",
  1574. "<|im_middle|>"
  1575. });
  1576. // build grammar for tool call
  1577. static const xml_tool_call_format form = ([]() {
  1578. xml_tool_call_format form {};
  1579. form.scope_start = "<|tool_calls_section_begin|>";
  1580. form.tool_start = "<|tool_call_begin|>";
  1581. form.tool_sep = "<|tool_call_argument_begin|>{";
  1582. form.key_start = "\"";
  1583. form.key_val_sep = "\": ";
  1584. form.val_end = ", ";
  1585. form.tool_end = "}<|tool_call_end|>";
  1586. form.scope_end = "<|tool_calls_section_end|>";
  1587. form.raw_argval = false;
  1588. form.last_val_end = "";
  1589. return form;
  1590. })();
  1591. build_grammar_xml_tool_call(data, params.tools, form);
  1592. return data;
  1593. }
  1594. static common_chat_params common_chat_params_init_apriel_1_5(const common_chat_template & tmpl, const struct templates_params & params) {
  1595. common_chat_params data;
  1596. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1597. data.prompt = apply(tmpl, params);
  1598. data.format = COMMON_CHAT_FORMAT_APRIEL_1_5;
  1599. data.preserved_tokens = {
  1600. "<thinking>",
  1601. "</thinking>",
  1602. "<tool_calls>",
  1603. "</tool_calls>",
  1604. };
  1605. // build grammar for tool call
  1606. static const xml_tool_call_format form = ([]() {
  1607. xml_tool_call_format form {};
  1608. form.scope_start = "<tool_calls>[";
  1609. form.tool_start = "{\"name\": \"";
  1610. form.tool_sep = "\", \"arguments\": {";
  1611. form.key_start = "\"";
  1612. form.key_val_sep = "\": ";
  1613. form.val_end = ", ";
  1614. form.tool_end = "}, ";
  1615. form.scope_end = "]</tool_calls>";
  1616. form.raw_argval = false;
  1617. form.last_val_end = "";
  1618. form.last_tool_end = "}";
  1619. return form;
  1620. })();
  1621. build_grammar_xml_tool_call(data, params.tools, form);
  1622. return data;
  1623. }
  1624. static common_chat_params common_chat_params_init_xiaomi_mimo(const common_chat_template & tmpl, const struct templates_params & params) {
  1625. common_chat_params data;
  1626. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1627. data.prompt = apply(tmpl, params);
  1628. data.format = COMMON_CHAT_FORMAT_XIAOMI_MIMO;
  1629. data.preserved_tokens = {
  1630. "<tool_call>",
  1631. "</tool_call>",
  1632. };
  1633. // build grammar for tool call
  1634. static const xml_tool_call_format form = ([]() {
  1635. xml_tool_call_format form {};
  1636. form.scope_start = "\n";
  1637. form.tool_start = "<tool_call>\n{\"name\": \"";
  1638. form.tool_sep = "\", \"arguments\": {";
  1639. form.key_start = "\"";
  1640. form.key_val_sep = "\": ";
  1641. form.val_end = ", ";
  1642. form.tool_end = "}\n</tool_call>";
  1643. form.scope_end = "";
  1644. form.raw_argval = false;
  1645. form.last_val_end = "";
  1646. return form;
  1647. })();
  1648. build_grammar_xml_tool_call(data, params.tools, form);
  1649. return data;
  1650. }
  1651. static common_chat_params common_chat_params_init_gpt_oss(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1652. common_chat_params data;
  1653. // Copy reasoning to the "thinking" field as expected by the gpt-oss template
  1654. auto adjusted_messages = json::array();
  1655. for (const auto & msg : inputs.messages) {
  1656. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  1657. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  1658. if (has_reasoning_content && has_tool_calls) {
  1659. auto adjusted_message = msg;
  1660. adjusted_message["thinking"] = msg.at("reasoning_content");
  1661. adjusted_messages.push_back(adjusted_message);
  1662. } else {
  1663. adjusted_messages.push_back(msg);
  1664. }
  1665. }
  1666. auto prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  1667. // Check if we need to replace the return token with end token during
  1668. // inference and without generation prompt. For more details see:
  1669. // https://github.com/ggml-org/llama.cpp/issues/15417
  1670. if (inputs.is_inference && !inputs.add_generation_prompt) {
  1671. static constexpr std::string_view return_token = "<|return|>";
  1672. static constexpr std::string_view end_token = "<|end|>";
  1673. if (size_t pos = prompt.rfind(return_token); pos != std::string::npos) {
  1674. prompt.replace(pos, return_token.length(), end_token);
  1675. }
  1676. }
  1677. data.prompt = prompt;
  1678. data.format = COMMON_CHAT_FORMAT_GPT_OSS;
  1679. // These special tokens are required to parse properly, so we include them
  1680. // even if parse_tool_calls is false.
  1681. data.preserved_tokens = {
  1682. "<|channel|>",
  1683. "<|constrain|>",
  1684. "<|message|>",
  1685. "<|start|>",
  1686. "<|end|>",
  1687. };
  1688. if (!inputs.json_schema.is_null()) {
  1689. data.grammar_lazy = false;
  1690. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1691. auto schema = inputs.json_schema;
  1692. builder.resolve_refs(schema);
  1693. auto not_end = builder.add_rule("not-end",
  1694. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1695. auto analysis = builder.add_rule("analysis",
  1696. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1697. auto constraint = builder.add_rule("constraint", "\"<|constrain|>\"? [a-zA-Z0-9_-]+");
  1698. auto final = builder.add_rule("final",
  1699. "\"<|channel|>final\" ( \" \" " + constraint + " )? \"<|message|>\" " +
  1700. builder.add_schema("response", schema)
  1701. );
  1702. builder.add_rule("root", "( " + analysis + " \"<|start|>assistant\" )? " + final);
  1703. });
  1704. }
  1705. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1706. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1707. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1708. // tool calls can appear in commentary or analysis channels
  1709. auto channel = builder.add_rule("channel", "\"<|channel|>\" ( \"commentary\" | \"analysis\" )");
  1710. std::vector<std::string> tool_rules_recipient_in_role;
  1711. std::vector<std::string> tool_rules_recipient_in_channel;
  1712. foreach_function(inputs.tools, [&](const json & tool) {
  1713. const auto & function = tool.at("function");
  1714. std::string name = function.at("name");
  1715. auto parameters = function.at("parameters");
  1716. builder.resolve_refs(parameters);
  1717. tool_rules_recipient_in_role.push_back(
  1718. builder.add_rule(name + "-call",
  1719. "\"" + name + "\"" + channel + " \" <|constrain|>json\"? \"<|message|>\" " +
  1720. builder.add_schema(name + "-args", parameters)
  1721. )
  1722. );
  1723. tool_rules_recipient_in_channel.push_back(
  1724. builder.add_rule(name + "-call",
  1725. "\"" + name + "\"" + " \" <|constrain|>json\"? \"<|message|>\" " +
  1726. builder.add_schema(name + "-args", parameters)
  1727. )
  1728. );
  1729. });
  1730. auto recipient_in_channel = builder.add_rule("recipient_in_channel",
  1731. channel + " \" to=functions.\" ( " +
  1732. string_join(tool_rules_recipient_in_channel, " | ") + " )"
  1733. );
  1734. if (data.grammar_lazy) {
  1735. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1736. "\"<|start|>assistant\"? \" to=functions.\" ( " +
  1737. string_join(tool_rules_recipient_in_role, " | ") + " )"
  1738. );
  1739. builder.add_rule("root", recipient_in_role + " | " + recipient_in_channel);
  1740. } else {
  1741. auto not_end = builder.add_rule("not-end",
  1742. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1743. auto analysis = builder.add_rule("analysis",
  1744. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1745. auto commentary = builder.add_rule("commentary",
  1746. "\"<|channel|>commentary<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1747. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1748. "\" to=functions.\" ( " + string_join(tool_rules_recipient_in_role, " | ") + " )"
  1749. );
  1750. builder.add_rule("root",
  1751. "( " + analysis + " \"<|start|>assistant\" )? " +
  1752. "( " + commentary + " \"<|start|>assistant\" )? " +
  1753. "( " + recipient_in_role + " | " + recipient_in_channel + " )"
  1754. );
  1755. }
  1756. // Trigger on tool calls that appear in the commentary channel
  1757. data.grammar_triggers.push_back({
  1758. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1759. "<\\|channel\\|>(commentary|analysis) to"
  1760. });
  1761. // Trigger tool calls that appear in the role section, either at the
  1762. // start or in the middle.
  1763. data.grammar_triggers.push_back({
  1764. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1765. "^ to"
  1766. });
  1767. data.grammar_triggers.push_back({
  1768. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1769. "<\\|start\\|>assistant to"
  1770. });
  1771. });
  1772. }
  1773. return data;
  1774. }
  1775. static common_chat_params common_chat_params_init_glm_4_5(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1776. common_chat_params data;
  1777. data.grammar_lazy = inputs.tools.is_array() && !inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1778. std::string prompt = apply(tmpl, inputs);
  1779. // match the existing trimming behavior
  1780. if (inputs.add_bos && string_starts_with(prompt, tmpl.bos_token())) {
  1781. prompt.erase(0, tmpl.bos_token().size());
  1782. }
  1783. if (inputs.add_eos && string_ends_with(prompt, tmpl.eos_token())) {
  1784. prompt.erase(prompt.size() - tmpl.eos_token().size());
  1785. }
  1786. if (string_ends_with(prompt, "<think>")) {
  1787. if (!inputs.enable_thinking) {
  1788. prompt += "</think>";
  1789. } else {
  1790. data.thinking_forced_open = true;
  1791. }
  1792. }
  1793. // add GLM preserved tokens
  1794. data.preserved_tokens = {
  1795. "<|endoftext|>",
  1796. "[MASK]",
  1797. "[gMASK]",
  1798. "[sMASK]",
  1799. "<sop>",
  1800. "<eop>",
  1801. "<|system|>",
  1802. "<|user|>",
  1803. "<|assistant|>",
  1804. "<|observation|>",
  1805. "<|begin_of_image|>",
  1806. "<|end_of_image|>",
  1807. "<|begin_of_video|>",
  1808. "<|end_of_video|>",
  1809. "<|begin_of_audio|>",
  1810. "<|end_of_audio|>",
  1811. "<|begin_of_transcription|>",
  1812. "<|end_of_transcription|>",
  1813. "<|code_prefix|>",
  1814. "<|code_middle|>",
  1815. "<|code_suffix|>",
  1816. "/nothink",
  1817. "<think>",
  1818. "</think>",
  1819. "<tool_call>",
  1820. "</tool_call>",
  1821. "<arg_key>",
  1822. "</arg_key>",
  1823. "<arg_value>",
  1824. "</arg_value>"
  1825. };
  1826. // extra GLM 4.5 stop word
  1827. data.additional_stops.insert(data.additional_stops.end(), {
  1828. "<|user|>",
  1829. "<|observation|>"
  1830. });
  1831. // build grammar for tool call
  1832. static const xml_tool_call_format form {
  1833. /* form.scope_start = */ "",
  1834. /* form.tool_start = */ "\n<tool_call>",
  1835. /* form.tool_sep = */ "\n",
  1836. /* form.key_start = */ "<arg_key>",
  1837. /* form.key_val_sep = */ "</arg_key>\n<arg_value>",
  1838. /* form.val_end = */ "</arg_value>\n",
  1839. /* form.tool_end = */ "</tool_call>\n",
  1840. /* form.scope_end = */ "",
  1841. };
  1842. build_grammar_xml_tool_call(data, inputs.tools, form);
  1843. data.prompt = prompt;
  1844. data.format = COMMON_CHAT_FORMAT_GLM_4_5;
  1845. return data;
  1846. }
  1847. static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1848. LOG_DBG("%s\n", __func__);
  1849. common_chat_params data;
  1850. const std::optional<json> tools_override = json();
  1851. const std::optional<json> additional_context = json {
  1852. {"datetime", format_time(inputs.now, "%b %d %Y %H:%M:%S GMT")},
  1853. {"functions", json(inputs.tools.empty() ? "" : inputs.tools.dump(2))},
  1854. };
  1855. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, tools_override, additional_context);
  1856. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1857. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1858. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1859. auto schemas = json::array();
  1860. foreach_function(inputs.tools, [&](const json & tool) {
  1861. const auto & function = tool.at("function");
  1862. schemas.push_back({
  1863. {"type", "object"},
  1864. {"properties", {
  1865. {"name", {
  1866. {"type", "string"},
  1867. {"const", function.at("name")},
  1868. }},
  1869. {"arguments", function.at("parameters")},
  1870. }},
  1871. {"required", json::array({"name", "arguments", "id"})},
  1872. });
  1873. });
  1874. auto schema = json {
  1875. {"type", "array"},
  1876. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1877. {"minItems", 1},
  1878. };
  1879. if (!inputs.parallel_tool_calls) {
  1880. schema["maxItems"] = 1;
  1881. }
  1882. builder.add_rule("root", "\" functools\"? " + builder.add_schema("tool_calls", schema));
  1883. });
  1884. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, " functools["});
  1885. data.preserved_tokens = {
  1886. " functools[",
  1887. };
  1888. data.format = COMMON_CHAT_FORMAT_FIREFUNCTION_V2;
  1889. } else {
  1890. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1891. }
  1892. return data;
  1893. }
  1894. static common_chat_params common_chat_params_init_functionary_v3_2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1895. // >>>all\nlet's call functions>>>fn1\n{"arg1": 1...}\n>>>fn2\n{"arg1": 1...}...
  1896. // Using ">>>f1\n", ">>>f2\n"... as trigger words for the grammar
  1897. // If the function is python, we also allow raw python code (if the line after `python\n` doesn't start w/ opening `{`), which the model seems to prefer for multiline code.
  1898. common_chat_params data;
  1899. data.prompt = apply(tmpl, inputs);
  1900. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2;
  1901. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1902. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1903. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1904. std::vector<std::string> first_tool_rules;
  1905. std::vector<std::string> subsequent_tool_rules;
  1906. foreach_function(inputs.tools, [&](const json & tool) {
  1907. const auto & function = tool.at("function");
  1908. std::string name = function.at("name");
  1909. auto parameters = function.at("parameters");
  1910. builder.resolve_refs(parameters);
  1911. std::string args_pattern = "[\\s\\S]*";
  1912. auto args_rule = builder.add_schema(name + "-args", parameters);
  1913. if (name == "python") {
  1914. args_rule = builder.add_rule(name + "-maybe-raw-args", args_rule + " | [^{] .*");
  1915. } else {
  1916. args_pattern = "\\{" + args_pattern;
  1917. }
  1918. auto call_rule = builder.add_rule(name + "-call", "\"" + name + "\\n\" " + args_rule);
  1919. first_tool_rules.push_back(call_rule);
  1920. if (inputs.parallel_tool_calls) {
  1921. subsequent_tool_rules.push_back(builder.add_rule(name + "-call2", "\">>>\" " + call_rule));
  1922. }
  1923. data.grammar_triggers.push_back({
  1924. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1925. "((?:[\\s\\S]+?>>>)?" + regex_escape(name) + "\n)" + args_pattern,
  1926. });
  1927. });
  1928. data.preserved_tokens = {
  1929. "<|end_header_id|>",
  1930. };
  1931. auto first_rule = first_tool_rules.empty() ? "" : builder.add_rule("first_tool_call", string_join(first_tool_rules, " | ")) + " space";
  1932. if (inputs.parallel_tool_calls) {
  1933. auto subsequent_rule = builder.add_rule("subsequent_tool_call", string_join(subsequent_tool_rules, " | ")) + " space";
  1934. builder.add_rule("root", first_rule + " (" + subsequent_rule + ")*");
  1935. } else {
  1936. builder.add_rule("root", first_rule);
  1937. }
  1938. });
  1939. }
  1940. return data;
  1941. }
  1942. static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1943. // https://github.com/MeetKai/functionary/blob/main/tests/prompt_test_v3-llama3.1.txt
  1944. common_chat_params data;
  1945. if (!inputs.tools.is_null()) {
  1946. std::string python_code_argument_name;
  1947. auto has_raw_python = false;
  1948. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1949. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1950. std::vector<std::string> tool_rules;
  1951. foreach_function(inputs.tools, [&](const json & tool) {
  1952. const auto & function = tool.at("function");
  1953. const auto & parameters = function.at("parameters");
  1954. std::string name = function.at("name");
  1955. if (name == "python" || name == "ipython") {
  1956. if (!parameters.contains("type")) {
  1957. throw std::runtime_error("Missing type in python tool");
  1958. }
  1959. has_raw_python = true;
  1960. const auto & type = parameters.at("type");
  1961. if (type == "object") {
  1962. auto properties = parameters.at("properties");
  1963. for (auto it = properties.begin(); it != properties.end(); ++it) {
  1964. if (it.value().at("type") == "string") {
  1965. if (!python_code_argument_name.empty()) {
  1966. throw std::runtime_error("Multiple string arguments found in python tool");
  1967. }
  1968. python_code_argument_name = it.key();
  1969. }
  1970. }
  1971. if (python_code_argument_name.empty()) {
  1972. throw std::runtime_error("No string argument found in python tool");
  1973. }
  1974. } else if (type != "string") {
  1975. throw std::runtime_error("Invalid type in python tool: " + type.dump());
  1976. }
  1977. }
  1978. tool_rules.push_back(builder.add_rule(name + "-call", "\"<function=" + name + ">\" " + builder.add_schema(name + "-args", parameters) + " \"</function>\" space"));
  1979. });
  1980. if (has_raw_python) {
  1981. tool_rules.push_back(builder.add_rule("python-call", "\"<|python_tag|>\" .*"));
  1982. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1983. data.preserved_tokens.push_back("<|python_tag|>");
  1984. }
  1985. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " space";
  1986. builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call);
  1987. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<function="});
  1988. });
  1989. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1;
  1990. } else {
  1991. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1992. }
  1993. data.prompt = apply(tmpl, inputs);
  1994. // TODO: if (has_raw_python)
  1995. return data;
  1996. }
  1997. static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1998. common_chat_params data;
  1999. json extra_context = json {
  2000. {"enable_thinking", inputs.enable_thinking},
  2001. };
  2002. extra_context.update(inputs.extra_context);
  2003. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, extra_context);
  2004. data.format = COMMON_CHAT_FORMAT_HERMES_2_PRO;
  2005. if (string_ends_with(data.prompt, "<think>\n")) {
  2006. if (!extra_context["enable_thinking"]) {
  2007. data.prompt += "</think>";
  2008. } else {
  2009. data.thinking_forced_open = true;
  2010. }
  2011. }
  2012. if (!inputs.tools.is_null()) {
  2013. // (content)?(<tool_call>{"name": "foo", "arguments": {"a": 1}}</tool_call>)*
  2014. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2015. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2016. std::vector<std::string> tool_rules;
  2017. std::vector<std::string> tool_call_alts;
  2018. std::vector<std::string> escaped_names;
  2019. foreach_function(inputs.tools, [&](const json & tool) {
  2020. const auto & function = tool.at("function");
  2021. std::string name = function.at("name");
  2022. auto parameters = function.at("parameters");
  2023. builder.resolve_refs(parameters);
  2024. tool_rules.push_back(builder.add_schema(name + "-call", {
  2025. {"type", "object"},
  2026. {"properties", json {
  2027. {"name", json {{"const", name}}},
  2028. {"arguments", parameters},
  2029. }},
  2030. {"required", json::array({"name", "arguments"})},
  2031. }));
  2032. tool_call_alts.push_back(builder.add_rule(
  2033. name + "-function-tag",
  2034. "\"<function\" ( \"=" + name + "\" | \" name=\\\"" + name + "\\\"\" ) \">\" space " +
  2035. builder.add_schema(name + "-args", parameters) + " "
  2036. "\"</function>\" space"));
  2037. data.grammar_triggers.push_back({
  2038. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  2039. "<function=" + name + ">",
  2040. });
  2041. auto escaped_name = regex_escape(name);
  2042. data.grammar_triggers.push_back({
  2043. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  2044. "<function\\s+name\\s*=\\s*\"" + escaped_name + "\"",
  2045. });
  2046. escaped_names.push_back(escaped_name);
  2047. });
  2048. auto any_tool_call = builder.add_rule("any_tool_call", "( " + string_join(tool_rules, " | ") + " ) space");
  2049. std::vector<std::string> alt_tags {
  2050. any_tool_call,
  2051. "\"<tool_call>\" space " + any_tool_call + " \"</tool_call>\"",
  2052. // The rest is just to accommodate common "good bad" outputs.
  2053. "\"<function_call>\" space " + any_tool_call + " \"</function_call>\"",
  2054. "\"<response>\" space " + any_tool_call + " \"</response>\"",
  2055. "\"<tools>\" space " + any_tool_call + " \"</tools>\"",
  2056. "\"<json>\" space " + any_tool_call + " \"</json>\"",
  2057. "\"<xml>\" space " + any_tool_call + " \"</xml>\"",
  2058. "\"<JSON>\" space " + any_tool_call + " \"</JSON>\"",
  2059. };
  2060. auto wrappable_tool_call = builder.add_rule("wrappable_tool_call", "( " + string_join(alt_tags, " | ") + " ) space");
  2061. tool_call_alts.push_back(wrappable_tool_call);
  2062. tool_call_alts.push_back(
  2063. "( \"```\\n\" | \"```json\\n\" | \"```xml\\n\" ) space " + wrappable_tool_call + " space \"```\" space ");
  2064. auto tool_call = builder.add_rule("tool_call", string_join(tool_call_alts, " | "));
  2065. builder.add_rule("root",
  2066. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  2067. (inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call));
  2068. // Trigger on some common known "good bad" outputs (only from the start and with a json that's about a specific argument name to avoid false positives)
  2069. data.grammar_triggers.push_back({
  2070. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  2071. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  2072. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  2073. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") + (
  2074. "\\s*("
  2075. "(?:<tool_call>"
  2076. "|<function"
  2077. "|(?:```(?:json|xml)?\n\\s*)?(?:<function_call>|<tools>|<xml><json>|<response>)?"
  2078. "\\s*\\{\\s*\"name\"\\s*:\\s*\"(?:" + string_join(escaped_names, "|") + ")\""
  2079. ")"
  2080. ")[\\s\\S]*"
  2081. ),
  2082. });
  2083. data.preserved_tokens = {
  2084. "<think>",
  2085. "</think>",
  2086. "<tool_call>",
  2087. "</tool_call>",
  2088. "<function",
  2089. "<tools>",
  2090. "</tools>",
  2091. "<response>",
  2092. "</response>",
  2093. "<function_call>",
  2094. "</function_call>",
  2095. "<json>",
  2096. "</json>",
  2097. "<JSON>",
  2098. "</JSON>",
  2099. "```",
  2100. "```json",
  2101. "```xml",
  2102. };
  2103. });
  2104. }
  2105. return data;
  2106. }
  2107. static common_chat_params common_chat_params_init_granite(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2108. common_chat_params data;
  2109. // Pass thinking context for Granite template
  2110. json additional_context = {
  2111. {"thinking", inputs.enable_thinking},
  2112. };
  2113. data.prompt = apply(tmpl, inputs, /* messages_override= */ std::nullopt, /* tools_override= */ std::nullopt, additional_context);
  2114. data.format = COMMON_CHAT_FORMAT_GRANITE;
  2115. if (string_ends_with(data.prompt, "<think>\n") || string_ends_with(data.prompt, "<think>")) {
  2116. if (!inputs.enable_thinking) {
  2117. data.prompt += "</think>";
  2118. } else {
  2119. data.thinking_forced_open = true;
  2120. }
  2121. }
  2122. if (!inputs.tools.is_null()) {
  2123. // Granite uses <|tool_call|> followed by JSON list
  2124. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2125. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2126. std::vector<std::string> tool_rules;
  2127. foreach_function(inputs.tools, [&](const json & tool) {
  2128. const auto & function = tool.at("function");
  2129. std::string name = function.at("name");
  2130. auto parameters = function.at("parameters");
  2131. builder.resolve_refs(parameters);
  2132. tool_rules.push_back(builder.add_rule(name + "-call", builder.add_schema(name +
  2133. "-args", {
  2134. {"type", "object"},
  2135. {"properties", {
  2136. {"name", {{"const", name}}},
  2137. {"arguments", parameters},
  2138. }},
  2139. {"required", json::array({"name", "arguments"})},
  2140. })));
  2141. });
  2142. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | "));
  2143. auto tool_list = builder.add_rule("tool_list", "\"[\" space " + tool_call + " (\",\" space " + tool_call + ")* space \"]\"");
  2144. if (data.thinking_forced_open) {
  2145. builder.add_rule("root", "\"</think>\" space \"<response>\" space [^<]* \"</response>\" space \"<|tool_call|>\" space " + tool_list);
  2146. } else {
  2147. builder.add_rule("root", "\"<|tool_call|>\" space " + tool_list);
  2148. }
  2149. data.grammar_triggers.push_back({
  2150. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  2151. "<|tool_call|>"
  2152. });
  2153. data.preserved_tokens = {
  2154. "<think>",
  2155. "</think>",
  2156. "<response>",
  2157. "</response>",
  2158. "<|tool_call|>",
  2159. };
  2160. });
  2161. } else {
  2162. // Handle thinking tags for non-tool responses
  2163. if (data.thinking_forced_open && inputs.enable_thinking) {
  2164. data.grammar_lazy = false;
  2165. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2166. builder.add_rule("root", "\"</think>\" space \"<response>\" space .* \"</response>\" space");
  2167. });
  2168. data.preserved_tokens = {
  2169. "<think>",
  2170. "</think>",
  2171. "<response>",
  2172. "</response>",
  2173. };
  2174. }
  2175. }
  2176. return data;
  2177. }
  2178. static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2179. common_chat_params data;
  2180. data.prompt = apply(tmpl, inputs);
  2181. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  2182. data.grammar_lazy = false;
  2183. if (!inputs.json_schema.is_null()) {
  2184. if (!inputs.grammar.empty()) {
  2185. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  2186. }
  2187. data.grammar = json_schema_to_grammar(inputs.json_schema);
  2188. } else {
  2189. data.grammar = inputs.grammar;
  2190. }
  2191. return data;
  2192. }
  2193. static common_chat_params common_chat_params_init_seed_oss(
  2194. const common_chat_template & tmpl,
  2195. templates_params & params,
  2196. const common_chat_templates_inputs & inputs)
  2197. {
  2198. common_chat_params data;
  2199. data.prompt = apply(tmpl, params);
  2200. data.format = COMMON_CHAT_FORMAT_SEED_OSS;
  2201. if (string_ends_with(data.prompt, "<seed:think>")) {
  2202. if (!inputs.enable_thinking) {
  2203. data.prompt += "</seed:think>";
  2204. } else {
  2205. data.thinking_forced_open = true;
  2206. }
  2207. }
  2208. if (params.tools.is_array() && !params.tools.empty()) {
  2209. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2210. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2211. std::vector<std::string> tool_rules;
  2212. foreach_function(params.tools, [&](const json & tool) {
  2213. const auto & function = tool.at("function");
  2214. std::string name = function.at("name");
  2215. auto parameters = function.at("parameters");
  2216. builder.resolve_refs(parameters);
  2217. // Create rule for Seed-OSS function call format
  2218. std::string param_rules;
  2219. if (parameters.contains("properties")) {
  2220. for (const auto & [key, value] : parameters.at("properties").items()) {
  2221. param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
  2222. "\"</parameter>\"";
  2223. }
  2224. }
  2225. tool_rules.push_back(builder.add_rule(name + "-call",
  2226. "\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
  2227. param_rules +
  2228. " \"</function>\" space \"</seed:tool_call>\""));
  2229. });
  2230. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
  2231. data.preserved_tokens = {
  2232. "<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
  2233. "<function=", "</function>", "<parameter=", "</parameter>",
  2234. };
  2235. builder.add_rule("root", string_join(tool_rules, " | "));
  2236. });
  2237. }
  2238. return data;
  2239. }
  2240. static common_chat_params common_chat_templates_apply_jinja(
  2241. const struct common_chat_templates * tmpls,
  2242. const struct common_chat_templates_inputs & inputs)
  2243. {
  2244. templates_params params;
  2245. params.tools = common_chat_tools_to_json_oaicompat<json>(inputs.tools);
  2246. const auto & tmpl = params.tools.is_array() && tmpls->template_tool_use
  2247. ? *tmpls->template_tool_use
  2248. : *tmpls->template_default;
  2249. const auto & src = tmpl.source();
  2250. const auto & caps = tmpl.original_caps();
  2251. params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content);
  2252. params.add_generation_prompt = inputs.add_generation_prompt;
  2253. params.tool_choice = inputs.tool_choice;
  2254. params.reasoning_format = inputs.reasoning_format;
  2255. params.enable_thinking = inputs.enable_thinking;
  2256. params.grammar = inputs.grammar;
  2257. params.now = inputs.now;
  2258. params.add_bos = tmpls->add_bos;
  2259. params.add_eos = tmpls->add_eos;
  2260. params.extra_context = json::object();
  2261. for (auto el : inputs.chat_template_kwargs) {
  2262. params.extra_context[el.first] = json::parse(el.second);
  2263. }
  2264. if (!inputs.json_schema.empty()) {
  2265. params.json_schema = json::parse(inputs.json_schema);
  2266. }
  2267. if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
  2268. LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
  2269. params.parallel_tool_calls = false;
  2270. } else {
  2271. params.parallel_tool_calls = inputs.parallel_tool_calls;
  2272. }
  2273. if (params.tools.is_array()) {
  2274. if (params.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && !params.grammar.empty()) {
  2275. throw std::runtime_error("Cannot specify grammar with tools");
  2276. }
  2277. if (caps.supports_tool_calls && !caps.supports_tools) {
  2278. LOG_WRN("Template supports tool calls but does not natively describe tools. The fallback behaviour used may produce bad results, inspect prompt w/ --verbose & consider overriding the template.\n");
  2279. }
  2280. }
  2281. // DeepSeek V3.1: detect based on specific patterns in the template
  2282. if (src.find("message['prefix'] is defined and message['prefix'] and thinking") != std::string::npos &&
  2283. params.json_schema.is_null()) {
  2284. return common_chat_params_init_deepseek_v3_1(tmpl, params);
  2285. }
  2286. // DeepSeek R1: use handler in all cases except json schema (thinking / tools).
  2287. if (src.find("<|tool▁calls▁begin|>") != std::string::npos && params.json_schema.is_null()) {
  2288. return common_chat_params_init_deepseek_r1(tmpl, params);
  2289. }
  2290. // Command R7B: : use handler in all cases except json schema (thinking / tools).
  2291. if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos && params.json_schema.is_null()) {
  2292. return common_chat_params_init_command_r7b(tmpl, params);
  2293. }
  2294. // Granite (IBM) - detects thinking / tools support
  2295. if (src.find("elif thinking") != std::string::npos && src.find("<|tool_call|>") != std::string::npos) {
  2296. return common_chat_params_init_granite(tmpl, params);
  2297. }
  2298. // GLM 4.5: detect by <arg_key> and <arg_value> tags (check before Hermes since both use <tool_call>)
  2299. if (src.find("[gMASK]<sop>") != std::string::npos &&
  2300. src.find("<arg_key>") != std::string::npos &&
  2301. src.find("<arg_value>") != std::string::npos &&
  2302. params.json_schema.is_null()) {
  2303. return common_chat_params_init_glm_4_5(tmpl, params);
  2304. }
  2305. // Qwen3-Coder XML format detection (must come before Hermes 2 Pro)
  2306. // Detect via explicit XML markers unique to Qwen3-Coder to avoid false positives in other templates.
  2307. // Require presence of <tool_call>, <function=...>, and <parameter=...> blocks.
  2308. if (src.find("<tool_call>") != std::string::npos &&
  2309. src.find("<function>") != std::string::npos &&
  2310. src.find("<function=") != std::string::npos &&
  2311. src.find("<parameters>") != std::string::npos &&
  2312. src.find("<parameter=") != std::string::npos) {
  2313. return common_chat_params_init_qwen3_coder_xml(tmpl, params);
  2314. }
  2315. // Xiaomi MiMo format detection (must come before Hermes 2 Pro)
  2316. if (src.find("<tools>") != std::string::npos &&
  2317. src.find("# Tools") != std::string::npos &&
  2318. src.find("</tools>") != std::string::npos &&
  2319. src.find("<tool_calls>") != std::string::npos &&
  2320. src.find("</tool_calls>") != std::string::npos &&
  2321. src.find("<tool_response>") != std::string::npos) {
  2322. return common_chat_params_init_xiaomi_mimo(tmpl, params);
  2323. }
  2324. // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
  2325. if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
  2326. return common_chat_params_init_hermes_2_pro(tmpl, params);
  2327. }
  2328. // GPT-OSS
  2329. if (src.find("<|channel|>") != std::string::npos) {
  2330. return common_chat_params_init_gpt_oss(tmpl, params);
  2331. }
  2332. // Seed-OSS
  2333. if (src.find("<seed:think>") != std::string::npos) {
  2334. return common_chat_params_init_seed_oss(tmpl, params, inputs);
  2335. }
  2336. // Nemotron v2
  2337. if (src.find("<SPECIAL_10>") != std::string::npos) {
  2338. return common_chat_params_init_nemotron_v2(tmpl, params);
  2339. }
  2340. // Apertus format detection
  2341. if (src.find("<|system_start|>") != std::string::npos && src.find("<|tools_prefix|>") != std::string::npos) {
  2342. return common_chat_params_init_apertus(tmpl, params);
  2343. }
  2344. // LFM2 (w/ tools)
  2345. if (src.find("List of tools: <|tool_list_start|>[") != std::string::npos &&
  2346. src.find("]<|tool_list_end|>") != std::string::npos) {
  2347. return common_chat_params_init_lfm2(tmpl, params);
  2348. }
  2349. // MiniMax-M2 format detection
  2350. if (src.find("]~!b[") != std::string::npos && src.find("]~b]") != std::string::npos) {
  2351. return common_chat_params_init_minimax_m2(tmpl, params);
  2352. }
  2353. // Kimi K2 format detection
  2354. if (src.find("<|im_system|>tool_declare<|im_middle|>") != std::string::npos &&
  2355. src.find("<|tool_calls_section_begin|>") != std::string::npos &&
  2356. src.find("## Return of") != std::string::npos) {
  2357. return common_chat_params_init_kimi_k2(tmpl, params);
  2358. }
  2359. // Apriel 1.5 format detection
  2360. if (src.find("<thinking>") != std::string::npos &&
  2361. src.find("</thinking>") != std::string::npos &&
  2362. src.find("<available_tools>") != std::string::npos &&
  2363. src.find("<|assistant|>") != std::string::npos &&
  2364. src.find("<|tool_result|>") != std::string::npos &&
  2365. src.find("<tool_calls>[") != std::string::npos &&
  2366. src.find("]</tool_calls>") != std::string::npos) {
  2367. return common_chat_params_init_apriel_1_5(tmpl, params);
  2368. }
  2369. // Use generic handler when mixing tools + JSON schema.
  2370. // TODO: support that mix in handlers below.
  2371. if ((params.tools.is_array() && params.json_schema.is_object())) {
  2372. return common_chat_params_init_generic(tmpl, params);
  2373. }
  2374. // Functionary prepends "all\n" to plain content outputs, so we use its handler in all cases.
  2375. if (src.find(">>>all") != std::string::npos) {
  2376. return common_chat_params_init_functionary_v3_2(tmpl, params);
  2377. }
  2378. // Firefunction v2 requires datetime and functions in the context even w/o tools, so we also use its handler in all cases.
  2379. if (src.find(" functools[") != std::string::npos) {
  2380. return common_chat_params_init_firefunction_v2(tmpl, params);
  2381. }
  2382. // Functionary v3.1 (w/ tools)
  2383. if (src.find("<|start_header_id|>") != std::string::npos
  2384. && src.find("<function=") != std::string::npos) {
  2385. return common_chat_params_init_functionary_v3_1_llama_3_1(tmpl, params);
  2386. }
  2387. // Llama 3.1, 3.2, 3.3 (also requires date_string so using it even w/o tools)
  2388. if (src.find("<|start_header_id|>ipython<|end_header_id|>") != std::string::npos) {
  2389. auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos;
  2390. return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools);
  2391. }
  2392. // Ministral/Mistral Large 3
  2393. if (src.find("[SYSTEM_PROMPT]") != std::string::npos &&
  2394. src.find("[TOOL_CALLS]") != std::string::npos &&
  2395. src.find("[ARGS]") != std::string::npos) {
  2396. return common_chat_params_init_ministral_3(tmpl, params);
  2397. }
  2398. if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) {
  2399. return common_chat_params_init_magistral(tmpl, params);
  2400. }
  2401. // Plain handler (no tools)
  2402. if (params.tools.is_null() || inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_NONE) {
  2403. return common_chat_params_init_without_tools(tmpl, params);
  2404. }
  2405. // Mistral Nemo (w/ tools)
  2406. if (src.find("[TOOL_CALLS]") != std::string::npos) {
  2407. return common_chat_params_init_mistral_nemo(tmpl, params);
  2408. }
  2409. // Generic fallback
  2410. return common_chat_params_init_generic(tmpl, params);
  2411. }
  2412. // Legacy template route (adhoc C++ implementation of known templates), forward to llama_chat_apply_template.
  2413. static common_chat_params common_chat_templates_apply_legacy(
  2414. const struct common_chat_templates * tmpls,
  2415. const struct common_chat_templates_inputs & inputs)
  2416. {
  2417. size_t alloc_size = 0;
  2418. std::vector<llama_chat_message> chat;
  2419. std::vector<std::string> contents;
  2420. for (const auto & msg : inputs.messages) {
  2421. auto content = msg.content;
  2422. for (const auto & part : msg.content_parts) {
  2423. if (part.type != "text") {
  2424. LOG_WRN("Ignoring non-text content part: %s\n", part.type.c_str());
  2425. continue;
  2426. }
  2427. if (!content.empty()) {
  2428. content += "\n";;
  2429. }
  2430. content += part.text;
  2431. }
  2432. contents.emplace_back(std::move(content));
  2433. }
  2434. for (size_t i = 0; i < contents.size(); ++i) {
  2435. const auto & msg = inputs.messages[i];
  2436. const auto & content = contents[i];
  2437. chat.push_back({msg.role.c_str(), content.c_str()});
  2438. size_t msg_size = msg.role.size() + content.size();
  2439. alloc_size += msg_size + (msg_size / 4); // == msg_size * 1.25 but avoiding float ops
  2440. }
  2441. std::vector<char> buf(alloc_size);
  2442. // run the first time to get the total output length
  2443. const auto & src = tmpls->template_default->source();
  2444. int32_t res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2445. // error: chat template is not supported
  2446. if (res < 0) {
  2447. // if the custom "tmpl" is not supported, we throw an error
  2448. // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
  2449. throw std::runtime_error("this custom template is not supported, try using --jinja");
  2450. }
  2451. // if it turns out that our buffer is too small, we resize it
  2452. if ((size_t) res > buf.size()) {
  2453. buf.resize(res);
  2454. res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2455. }
  2456. // for safety, we check the result again
  2457. if (res < 0 || (size_t) res > buf.size()) {
  2458. throw std::runtime_error("failed to apply chat template, try using --jinja");
  2459. }
  2460. common_chat_params params;
  2461. params.prompt = std::string(buf.data(), res);
  2462. if (!inputs.json_schema.empty()) {
  2463. params.grammar = json_schema_to_grammar(json::parse(inputs.json_schema));
  2464. } else {
  2465. params.grammar = inputs.grammar;
  2466. }
  2467. return params;
  2468. }
  2469. common_chat_params common_chat_templates_apply(
  2470. const struct common_chat_templates * tmpls,
  2471. const struct common_chat_templates_inputs & inputs)
  2472. {
  2473. GGML_ASSERT(tmpls != nullptr);
  2474. return inputs.use_jinja
  2475. ? common_chat_templates_apply_jinja(tmpls, inputs)
  2476. : common_chat_templates_apply_legacy(tmpls, inputs);
  2477. }