chat.cpp 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598
  1. #include "chat.h"
  2. #include "chat-parser.h"
  3. #include "common.h"
  4. #include "json-partial.h"
  5. #include "json-schema-to-grammar.h"
  6. #include "log.h"
  7. #include "regex-partial.h"
  8. #include <minja/chat-template.hpp>
  9. #include <minja/minja.hpp>
  10. #include <algorithm>
  11. #include <cstdio>
  12. #include <cctype>
  13. #include <exception>
  14. #include <functional>
  15. #include <iostream>
  16. #include <optional>
  17. #include <stdexcept>
  18. #include <string>
  19. #include <vector>
  20. using json = nlohmann::ordered_json;
  21. static std::string format_time(const std::chrono::system_clock::time_point & now, const std::string & format) {
  22. auto time = std::chrono::system_clock::to_time_t(now);
  23. auto local_time = *std::localtime(&time);
  24. std::ostringstream ss;
  25. ss << std::put_time(&local_time, format.c_str());
  26. auto res = ss.str();
  27. return res;
  28. }
  29. static std::string string_diff(const std::string & last, const std::string & current) {
  30. if (last.empty()) {
  31. return current;
  32. }
  33. if (!string_starts_with(current, last)) {
  34. if (string_starts_with(last, current)) {
  35. // This happens if the last generation ended on a partial stop word (not erased),
  36. // and the current ended on a stop word (erased).
  37. return "";
  38. }
  39. throw std::runtime_error("Invalid diff: '" + last + "' not found at start of '" + current + "'");
  40. }
  41. return current.substr(last.size());
  42. }
  43. static bool has_content_or_tool_calls(const common_chat_msg & msg) {
  44. return !msg.content.empty() || !msg.tool_calls.empty();
  45. }
  46. template <>
  47. json common_chat_msg::to_json_oaicompat() const
  48. {
  49. json message {
  50. {"role", "assistant"},
  51. };
  52. if (!reasoning_content.empty()) {
  53. message["reasoning_content"] = reasoning_content;
  54. }
  55. if (content.empty() && !tool_calls.empty()) {
  56. message["content"] = json();
  57. } else {
  58. message["content"] = content;
  59. }
  60. if (!tool_calls.empty()) {
  61. auto arr = json::array();
  62. for (const auto & tc : tool_calls) {
  63. arr.push_back({
  64. {"type", "function"},
  65. {"function", {
  66. {"name", tc.name},
  67. {"arguments", tc.arguments},
  68. }},
  69. {"id", tc.id},
  70. // // Some templates generate and require an id (sometimes in a very specific format, e.g. Mistral Nemo).
  71. // // We only generate a random id for the ones that don't generate one by themselves
  72. // // (they also won't get to see it as their template likely doesn't use it, so it's all for the client)
  73. // {"id", tc.id.empty() ? gen_tool_call_id() : tc.id},
  74. });
  75. }
  76. message["tool_calls"] = arr;
  77. }
  78. return message;
  79. }
  80. std::vector<common_chat_msg_diff> common_chat_msg_diff::compute_diffs(const common_chat_msg & msg_prv, const common_chat_msg & msg_new) {
  81. std::vector<common_chat_msg_diff> diffs;
  82. if (msg_new.tool_calls.size() > msg_prv.tool_calls.size()) {
  83. diffs.reserve(msg_new.tool_calls.size() - msg_prv.tool_calls.size() + 3);
  84. } else {
  85. diffs.reserve(3);
  86. }
  87. // TODO: these can become expensive for long messages - how to optimize?
  88. if (msg_prv.reasoning_content != msg_new.reasoning_content) {
  89. auto & diff = diffs.emplace_back();
  90. diff.reasoning_content_delta = string_diff(msg_prv.reasoning_content, msg_new.reasoning_content);
  91. }
  92. if (msg_prv.content != msg_new.content) {
  93. auto & diff = diffs.emplace_back();
  94. diff.content_delta = string_diff(msg_prv.content, msg_new.content);
  95. }
  96. if (msg_new.tool_calls.size() < msg_prv.tool_calls.size()) {
  97. throw std::runtime_error("Invalid diff: now finding less tool calls!");
  98. }
  99. if (!msg_prv.tool_calls.empty()) {
  100. const auto idx = msg_prv.tool_calls.size() - 1;
  101. const auto & pref = msg_prv.tool_calls[idx];
  102. const auto & newf = msg_new.tool_calls[idx];
  103. if (pref.name != newf.name) {
  104. throw std::runtime_error("Invalid diff: tool call mismatch!");
  105. }
  106. const auto args_diff = string_diff(pref.arguments, newf.arguments);
  107. if (!args_diff.empty() || pref.id != newf.id) {
  108. auto & diff = diffs.emplace_back();
  109. diff.tool_call_index = idx;
  110. if (pref.id != newf.id) {
  111. diff.tool_call_delta.id = newf.id;
  112. diff.tool_call_delta.name = newf.name;
  113. }
  114. diff.tool_call_delta.arguments = args_diff;
  115. }
  116. }
  117. for (size_t idx = msg_prv.tool_calls.size(); idx < msg_new.tool_calls.size(); ++idx) {
  118. auto & diff = diffs.emplace_back();
  119. diff.tool_call_index = idx;
  120. diff.tool_call_delta = msg_new.tool_calls[idx];
  121. }
  122. return diffs;
  123. }
  124. typedef minja::chat_template common_chat_template;
  125. struct common_chat_templates {
  126. bool add_bos;
  127. bool add_eos;
  128. bool has_explicit_template; // Model had builtin template or template overridde was specified.
  129. std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
  130. std::unique_ptr<common_chat_template> template_tool_use;
  131. };
  132. struct templates_params {
  133. json messages;
  134. json tools;
  135. common_chat_tool_choice tool_choice;
  136. json json_schema;
  137. bool parallel_tool_calls;
  138. bool stream;
  139. std::string grammar;
  140. bool add_generation_prompt = true;
  141. bool enable_thinking = true;
  142. std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
  143. json extra_context;
  144. bool add_bos;
  145. bool add_eos;
  146. bool is_inference = true;
  147. };
  148. common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice) {
  149. if (tool_choice == "auto") {
  150. return COMMON_CHAT_TOOL_CHOICE_AUTO;
  151. }
  152. if (tool_choice == "none") {
  153. return COMMON_CHAT_TOOL_CHOICE_NONE;
  154. }
  155. if (tool_choice == "required") {
  156. return COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  157. }
  158. throw std::invalid_argument("Invalid tool_choice: " + tool_choice);
  159. }
  160. bool common_chat_templates_support_enable_thinking(const common_chat_templates * chat_templates) {
  161. common_chat_templates_inputs dummy_inputs;
  162. common_chat_msg msg;
  163. msg.role = "user";
  164. msg.content = "test";
  165. dummy_inputs.messages = {msg};
  166. dummy_inputs.enable_thinking = false;
  167. const auto rendered_no_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  168. dummy_inputs.enable_thinking = true;
  169. const auto rendered_with_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  170. return rendered_no_thinking.prompt != rendered_with_thinking.prompt;
  171. }
  172. template <>
  173. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const json & messages) {
  174. std::vector<common_chat_msg> msgs;
  175. try {
  176. if (!messages.is_array()) {
  177. throw std::invalid_argument("Expected 'messages' to be an array, got " + messages.dump());
  178. }
  179. for (const auto & message : messages) {
  180. if (!message.is_object()) {
  181. throw std::invalid_argument("Expected 'message' to be an object, got " + message.dump());
  182. }
  183. common_chat_msg msg;
  184. if (!message.contains("role")) {
  185. throw std::invalid_argument("Missing 'role' in message: " + message.dump());
  186. }
  187. msg.role = message.at("role");
  188. auto has_content = message.contains("content");
  189. auto has_tool_calls = message.contains("tool_calls");
  190. if (has_content) {
  191. const auto & content = message.at("content");
  192. if (content.is_string()) {
  193. msg.content = content;
  194. } else if (content.is_array()) {
  195. for (const auto & part : content) {
  196. if (!part.contains("type")) {
  197. throw std::invalid_argument("Missing content part type: " + part.dump());
  198. }
  199. const auto & type = part.at("type");
  200. if (type != "text") {
  201. throw std::invalid_argument("Unsupported content part type: " + type.dump());
  202. }
  203. common_chat_msg_content_part msg_part;
  204. msg_part.type = type;
  205. msg_part.text = part.at("text");
  206. msg.content_parts.push_back(msg_part);
  207. }
  208. } else if (!content.is_null()) {
  209. throw std::invalid_argument("Invalid 'content' type: expected string or array, got " + content.dump() + " (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
  210. }
  211. }
  212. if (has_tool_calls) {
  213. for (const auto & tool_call : message.at("tool_calls")) {
  214. common_chat_tool_call tc;
  215. if (!tool_call.contains("type")) {
  216. throw std::invalid_argument("Missing tool call type: " + tool_call.dump());
  217. }
  218. const auto & type = tool_call.at("type");
  219. if (type != "function") {
  220. throw std::invalid_argument("Unsupported tool call type: " + tool_call.dump());
  221. }
  222. if (!tool_call.contains("function")) {
  223. throw std::invalid_argument("Missing tool call function: " + tool_call.dump());
  224. }
  225. const auto & fc = tool_call.at("function");
  226. if (!fc.contains("name")) {
  227. throw std::invalid_argument("Missing tool call name: " + tool_call.dump());
  228. }
  229. tc.name = fc.at("name");
  230. tc.arguments = fc.at("arguments");
  231. if (tool_call.contains("id")) {
  232. tc.id = tool_call.at("id");
  233. }
  234. msg.tool_calls.push_back(tc);
  235. }
  236. }
  237. if (!has_content && !has_tool_calls) {
  238. throw std::invalid_argument("Expected 'content' or 'tool_calls' (ref: https://github.com/ggml-org/llama.cpp/issues/8367 & https://github.com/ggml-org/llama.cpp/issues/12279)");
  239. }
  240. if (message.contains("reasoning_content")) {
  241. msg.reasoning_content = message.at("reasoning_content");
  242. }
  243. if (message.contains("name")) {
  244. msg.tool_name = message.at("name");
  245. }
  246. if (message.contains("tool_call_id")) {
  247. msg.tool_call_id = message.at("tool_call_id");
  248. }
  249. msgs.push_back(msg);
  250. }
  251. } catch (const std::exception & e) {
  252. // @ngxson : disable otherwise it's bloating the API response
  253. // printf("%s\n", std::string("; messages = ") + messages.dump(2));
  254. throw std::runtime_error("Failed to parse messages: " + std::string(e.what()));
  255. }
  256. return msgs;
  257. }
  258. template <>
  259. json common_chat_msgs_to_json_oaicompat(const std::vector<common_chat_msg> & msgs, bool concat_typed_text) {
  260. json messages = json::array();
  261. for (const auto & msg : msgs) {
  262. if (!msg.content.empty() && !msg.content_parts.empty()) {
  263. throw std::runtime_error("Cannot specify both content and content_parts");
  264. }
  265. json jmsg {
  266. {"role", msg.role},
  267. };
  268. if (!msg.content.empty()) {
  269. jmsg["content"] = msg.content;
  270. } else if (!msg.content_parts.empty()) {
  271. if (concat_typed_text) {
  272. std::string text;
  273. for (const auto & part : msg.content_parts) {
  274. if (part.type != "text") {
  275. LOG_WRN("Ignoring content part type: %s\n", part.type.c_str());
  276. continue;
  277. }
  278. if (!text.empty()) {
  279. text += '\n';
  280. }
  281. text += part.text;
  282. }
  283. jmsg["content"] = text;
  284. } else {
  285. auto & parts = jmsg["content"] = json::array();
  286. for (const auto & part : msg.content_parts) {
  287. parts.push_back({
  288. {"type", part.type},
  289. {"text", part.text},
  290. });
  291. }
  292. }
  293. } else {
  294. jmsg["content"] = json(); // null
  295. }
  296. if (!msg.reasoning_content.empty()) {
  297. jmsg["reasoning_content"] = msg.reasoning_content;
  298. }
  299. if (!msg.tool_name.empty()) {
  300. jmsg["name"] = msg.tool_name;
  301. }
  302. if (!msg.tool_call_id.empty()) {
  303. jmsg["tool_call_id"] = msg.tool_call_id;
  304. }
  305. if (!msg.tool_calls.empty()) {
  306. auto & tool_calls = jmsg["tool_calls"] = json::array();
  307. for (const auto & tool_call : msg.tool_calls) {
  308. json tc {
  309. {"type", "function"},
  310. {"function", {
  311. {"name", tool_call.name},
  312. {"arguments", tool_call.arguments},
  313. }},
  314. };
  315. if (!tool_call.id.empty()) {
  316. tc["id"] = tool_call.id;
  317. }
  318. tool_calls.push_back(tc);
  319. }
  320. }
  321. messages.push_back(jmsg);
  322. }
  323. return messages;
  324. }
  325. template <>
  326. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const std::string & messages) {
  327. return common_chat_msgs_parse_oaicompat(json::parse(messages));
  328. }
  329. template <>
  330. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const json & tools) {
  331. std::vector<common_chat_tool> result;
  332. try {
  333. if (!tools.is_null()) {
  334. if (!tools.is_array()) {
  335. throw std::invalid_argument("Expected 'tools' to be an array, got " + tools.dump());
  336. }
  337. for (const auto & tool : tools) {
  338. if (!tool.contains("type")) {
  339. throw std::invalid_argument("Missing tool type: " + tool.dump());
  340. }
  341. const auto & type = tool.at("type");
  342. if (!type.is_string() || type != "function") {
  343. throw std::invalid_argument("Unsupported tool type: " + tool.dump());
  344. }
  345. if (!tool.contains("function")) {
  346. throw std::invalid_argument("Missing tool function: " + tool.dump());
  347. }
  348. const auto & function = tool.at("function");
  349. result.push_back({
  350. /* .name = */ function.at("name"),
  351. /* .description = */ function.at("description"),
  352. /* .parameters = */ function.at("parameters").dump(),
  353. });
  354. }
  355. }
  356. } catch (const std::exception & e) {
  357. throw std::runtime_error("Failed to parse tools: " + std::string(e.what()) + "; tools = " + tools.dump(2));
  358. }
  359. return result;
  360. }
  361. template <>
  362. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const std::string & tools) {
  363. return common_chat_tools_parse_oaicompat(json::parse(tools));
  364. }
  365. template <>
  366. json common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools) {
  367. if (tools.empty()) {
  368. return json();
  369. }
  370. auto result = json::array();
  371. for (const auto & tool : tools) {
  372. result.push_back({
  373. {"type", "function"},
  374. {"function", {
  375. {"name", tool.name},
  376. {"description", tool.description},
  377. {"parameters", json::parse(tool.parameters)},
  378. }},
  379. });
  380. }
  381. return result;
  382. }
  383. template <> json common_chat_msg_diff_to_json_oaicompat(const common_chat_msg_diff & diff) {
  384. json delta = json::object();
  385. if (!diff.reasoning_content_delta.empty()) {
  386. delta["reasoning_content"] = diff.reasoning_content_delta;
  387. }
  388. if (!diff.content_delta.empty()) {
  389. delta["content"] = diff.content_delta;
  390. }
  391. if (diff.tool_call_index != std::string::npos) {
  392. json tool_call;
  393. tool_call["index"] = diff.tool_call_index;
  394. if (!diff.tool_call_delta.id.empty()) {
  395. tool_call["id"] = diff.tool_call_delta.id;
  396. tool_call["type"] = "function";
  397. }
  398. json function = json::object();
  399. if (!diff.tool_call_delta.name.empty()) {
  400. function["name"] = diff.tool_call_delta.name;
  401. }
  402. function["arguments"] = diff.tool_call_delta.arguments;
  403. tool_call["function"] = function;
  404. delta["tool_calls"] = json::array({tool_call});
  405. }
  406. return delta;
  407. }
  408. bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
  409. if (use_jinja) {
  410. try {
  411. common_chat_msg msg;
  412. msg.role = "user";
  413. msg.content = "test";
  414. auto tmpls = common_chat_templates_init(/* model= */ nullptr, tmpl);
  415. common_chat_templates_inputs inputs;
  416. inputs.messages = {msg};
  417. common_chat_templates_apply(tmpls.get(), inputs);
  418. return true;
  419. } catch (const std::exception & e) {
  420. LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what());
  421. return false;
  422. }
  423. }
  424. llama_chat_message chat[] = {{"user", "test"}};
  425. const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
  426. return res >= 0;
  427. }
  428. std::string common_chat_format_single(
  429. const struct common_chat_templates * tmpls,
  430. const std::vector<common_chat_msg> & past_msg,
  431. const common_chat_msg & new_msg,
  432. bool add_ass,
  433. bool use_jinja) {
  434. common_chat_templates_inputs inputs;
  435. inputs.use_jinja = use_jinja;
  436. inputs.add_bos = tmpls->add_bos;
  437. inputs.add_eos = tmpls->add_eos;
  438. std::string fmt_past_msg;
  439. if (!past_msg.empty()) {
  440. inputs.messages = past_msg;
  441. inputs.add_generation_prompt = false;
  442. fmt_past_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  443. }
  444. std::ostringstream ss;
  445. // if the past_msg ends with a newline, we must preserve it in the formatted version
  446. if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
  447. ss << "\n";
  448. };
  449. // format chat with new_msg
  450. inputs.messages.push_back(new_msg);
  451. inputs.add_generation_prompt = add_ass;
  452. auto fmt_new_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  453. // get the diff part
  454. ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
  455. return ss.str();
  456. }
  457. std::string common_chat_format_example(const struct common_chat_templates * tmpls, bool use_jinja, const std::map<std::string, std::string> & chat_template_kwargs) {
  458. common_chat_templates_inputs inputs;
  459. inputs.use_jinja = use_jinja;
  460. inputs.add_bos = tmpls->add_bos;
  461. inputs.add_eos = tmpls->add_eos;
  462. inputs.chat_template_kwargs = chat_template_kwargs;
  463. auto add_simple_msg = [&](auto role, auto content) {
  464. common_chat_msg msg;
  465. msg.role = role;
  466. msg.content = content;
  467. inputs.messages.push_back(msg);
  468. };
  469. add_simple_msg("system", "You are a helpful assistant");
  470. add_simple_msg("user", "Hello");
  471. add_simple_msg("assistant", "Hi there");
  472. add_simple_msg("user", "How are you?");
  473. return common_chat_templates_apply(tmpls, inputs).prompt;
  474. }
  475. #define CHATML_TEMPLATE_SRC \
  476. "{%- for message in messages -%}\n" \
  477. " {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
  478. "{%- endfor -%}\n" \
  479. "{%- if add_generation_prompt -%}\n" \
  480. " {{- '<|im_start|>assistant\n' -}}\n" \
  481. "{%- endif -%}"
  482. void common_chat_templates_free(struct common_chat_templates * tmpls) {
  483. delete tmpls;
  484. }
  485. bool common_chat_templates_was_explicit(const struct common_chat_templates * tmpls) {
  486. return tmpls->has_explicit_template;
  487. }
  488. const char * common_chat_templates_source(const struct common_chat_templates * tmpls, const char * variant) {
  489. if (variant != nullptr) {
  490. if (strcmp(variant, "tool_use") == 0) {
  491. if (tmpls->template_tool_use) {
  492. return tmpls->template_tool_use->source().c_str();
  493. }
  494. return nullptr;
  495. } else {
  496. LOG_DBG("%s: unknown template variant: %s\n", __func__, variant);
  497. }
  498. }
  499. return tmpls->template_default->source().c_str();
  500. }
  501. common_chat_templates_ptr common_chat_templates_init(
  502. const struct llama_model * model,
  503. const std::string & chat_template_override,
  504. const std::string & bos_token_override,
  505. const std::string & eos_token_override)
  506. {
  507. std::string default_template_src;
  508. std::string template_tool_use_src;
  509. bool has_explicit_template = !chat_template_override.empty();
  510. if (chat_template_override.empty()) {
  511. GGML_ASSERT(model != nullptr);
  512. const auto * str = llama_model_chat_template(model, /* name */ nullptr);
  513. if (str) {
  514. default_template_src = str;
  515. has_explicit_template = true;
  516. }
  517. str = llama_model_chat_template(model, /* name */ "tool_use");
  518. if (str) {
  519. template_tool_use_src = str;
  520. has_explicit_template = true;
  521. }
  522. } else {
  523. default_template_src = chat_template_override;
  524. }
  525. if (default_template_src.empty() || default_template_src == "chatml") {
  526. if (!template_tool_use_src.empty()) {
  527. default_template_src = template_tool_use_src;
  528. } else {
  529. default_template_src = CHATML_TEMPLATE_SRC;
  530. }
  531. }
  532. // TODO @ngxson : this is a temporary hack to prevent chat template from throwing an error
  533. // Ref: https://github.com/ggml-org/llama.cpp/pull/15230#issuecomment-3173959633
  534. if (default_template_src.find("<|channel|>") != std::string::npos
  535. // search for the error message and patch it
  536. && default_template_src.find("in message.content or") != std::string::npos) {
  537. string_replace_all(default_template_src,
  538. "{%- if \"<|channel|>analysis<|message|>\" in message.content or \"<|channel|>final<|message|>\" in message.content %}",
  539. "{%- if false %}");
  540. }
  541. std::string token_bos = bos_token_override;
  542. std::string token_eos = eos_token_override;
  543. bool add_bos = false;
  544. bool add_eos = false;
  545. if (model) {
  546. const auto * vocab = llama_model_get_vocab(model);
  547. const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
  548. if (token == LLAMA_TOKEN_NULL) {
  549. if (default_template_src.find(jinja_variable_name) != std::string::npos
  550. || template_tool_use_src.find(jinja_variable_name) != std::string::npos) {
  551. LOG_WRN("common_chat_templates_init: warning: vocab does not have a %s token, jinja template won't work as intended.\n", name);
  552. }
  553. return std::string();
  554. }
  555. return common_token_to_piece(vocab, token, true);
  556. };
  557. token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
  558. token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
  559. add_bos = llama_vocab_get_add_bos(vocab);
  560. add_eos = llama_vocab_get_add_eos(vocab);
  561. }
  562. common_chat_templates_ptr tmpls(new common_chat_templates());
  563. tmpls->has_explicit_template = has_explicit_template;
  564. tmpls->add_bos = add_bos;
  565. tmpls->add_eos = add_eos;
  566. try {
  567. tmpls->template_default = std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos);
  568. } catch (const std::exception & e) {
  569. LOG_ERR("%s: failed to parse chat template (defaulting to chatml): %s \n", __func__, e.what());
  570. tmpls->template_default = std::make_unique<minja::chat_template>(CHATML_TEMPLATE_SRC, token_bos, token_eos);
  571. }
  572. if (!template_tool_use_src.empty()) {
  573. try {
  574. tmpls->template_tool_use = std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos);
  575. } catch (const std::exception & e) {
  576. LOG_ERR("%s: failed to parse tool use chat template (ignoring it): %s\n", __func__, e.what());
  577. }
  578. }
  579. return tmpls;
  580. }
  581. const char * common_chat_format_name(common_chat_format format) {
  582. switch (format) {
  583. case COMMON_CHAT_FORMAT_CONTENT_ONLY: return "Content-only";
  584. case COMMON_CHAT_FORMAT_GENERIC: return "Generic";
  585. case COMMON_CHAT_FORMAT_MISTRAL_NEMO: return "Mistral Nemo";
  586. case COMMON_CHAT_FORMAT_MAGISTRAL: return "Magistral";
  587. case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x";
  588. case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools";
  589. case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1";
  590. case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2";
  591. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2";
  592. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1";
  593. case COMMON_CHAT_FORMAT_DEEPSEEK_V3_1: return "DeepSeek V3.1";
  594. case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro";
  595. case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
  596. case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
  597. case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
  598. case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
  599. case COMMON_CHAT_FORMAT_NEMOTRON_V2: return "Nemotron V2";
  600. case COMMON_CHAT_FORMAT_APERTUS: return "Apertus";
  601. case COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS: return "LFM2 with JSON tools";
  602. case COMMON_CHAT_FORMAT_MINIMAX_M2: return "MiniMax-M2";
  603. case COMMON_CHAT_FORMAT_GLM_4_5: return "GLM 4.5";
  604. case COMMON_CHAT_FORMAT_KIMI_K2: return "Kimi K2";
  605. case COMMON_CHAT_FORMAT_QWEN3_CODER_XML: return "Qwen3 Coder";
  606. case COMMON_CHAT_FORMAT_APRIEL_1_5: return "Apriel 1.5";
  607. case COMMON_CHAT_FORMAT_XIAOMI_MIMO: return "Xiaomi MiMo";
  608. case COMMON_CHAT_FORMAT_PEG_SIMPLE: return "peg-simple";
  609. case COMMON_CHAT_FORMAT_PEG_NATIVE: return "peg-native";
  610. case COMMON_CHAT_FORMAT_PEG_CONSTRUCTED: return "peg-constructed";
  611. default:
  612. throw std::runtime_error("Unknown chat format");
  613. }
  614. }
  615. const char * common_reasoning_format_name(common_reasoning_format format) {
  616. switch (format) {
  617. case COMMON_REASONING_FORMAT_NONE: return "none";
  618. case COMMON_REASONING_FORMAT_AUTO: return "auto";
  619. case COMMON_REASONING_FORMAT_DEEPSEEK: return "deepseek";
  620. case COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY: return "deepseek-legacy";
  621. default:
  622. throw std::runtime_error("Unknown reasoning format");
  623. }
  624. }
  625. common_reasoning_format common_reasoning_format_from_name(const std::string & format) {
  626. if (format == "none") {
  627. return COMMON_REASONING_FORMAT_NONE;
  628. } else if (format == "auto") {
  629. return COMMON_REASONING_FORMAT_AUTO;
  630. } else if (format == "deepseek") {
  631. return COMMON_REASONING_FORMAT_DEEPSEEK;
  632. } else if (format == "deepseek-legacy") {
  633. return COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY;
  634. }
  635. throw std::runtime_error("Unknown reasoning format: " + format);
  636. }
  637. static void foreach_function(const json & tools, const std::function<void(const json &)> & fn) {
  638. for (const auto & tool : tools) {
  639. if (!tool.contains("type") || tool.at("type") != "function" || !tool.contains("function")) {
  640. LOG_INF("Skipping tool without function: %s", tool.dump(2).c_str());
  641. continue;
  642. }
  643. fn(tool);
  644. }
  645. }
  646. static std::string apply(
  647. const common_chat_template & tmpl,
  648. const struct templates_params & inputs,
  649. const std::optional<json> & messages_override = std::nullopt,
  650. const std::optional<json> & tools_override = std::nullopt,
  651. const std::optional<json> & additional_context = std::nullopt)
  652. {
  653. minja::chat_template_inputs tmpl_inputs;
  654. tmpl_inputs.messages = messages_override ? *messages_override : inputs.messages;
  655. if (tools_override) {
  656. tmpl_inputs.tools = *tools_override;
  657. } else {
  658. tmpl_inputs.tools = inputs.tools.empty() ? json() : inputs.tools;
  659. }
  660. tmpl_inputs.add_generation_prompt = inputs.add_generation_prompt;
  661. tmpl_inputs.extra_context = inputs.extra_context;
  662. tmpl_inputs.extra_context["enable_thinking"] = inputs.enable_thinking;
  663. if (additional_context) {
  664. tmpl_inputs.extra_context.merge_patch(*additional_context);
  665. }
  666. // TODO: add flag to control date/time, if only for testing purposes.
  667. // tmpl_inputs.now = std::chrono::system_clock::now();
  668. minja::chat_template_options tmpl_opts;
  669. // To avoid double BOS / EOS tokens, we're manually removing begining / trailing tokens
  670. // instead of using `chat_template_options.use_bos_token = false`, since these tokens
  671. // may be needed inside the template / between messages too.
  672. auto result = tmpl.apply(tmpl_inputs, tmpl_opts);
  673. if (inputs.add_bos && string_starts_with(result, tmpl.bos_token())) {
  674. result = result.substr(tmpl.bos_token().size());
  675. }
  676. if (inputs.add_eos && string_ends_with(result, tmpl.eos_token())) {
  677. result = result.substr(0, result.size() - tmpl.eos_token().size());
  678. }
  679. return result;
  680. }
  681. static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct templates_params & inputs) {
  682. common_chat_params data;
  683. auto tool_call_schemas = json::array();
  684. foreach_function(inputs.tools, [&](const json & tool) {
  685. const auto & function = tool.at("function");
  686. auto tool_schema = json {
  687. {"type", "object"},
  688. {"properties", {
  689. {"name", {
  690. {"type", "string"},
  691. {"const", function.at("name")},
  692. }},
  693. {"arguments", function.at("parameters")},
  694. }},
  695. {"required", json::array({"name", "arguments"})},
  696. };
  697. if (function.contains("description")) {
  698. tool_schema["description"] = function.at("description");
  699. }
  700. if (inputs.parallel_tool_calls) {
  701. tool_schema.at("properties")["id"] = {
  702. {"type", "string"},
  703. {"minLength", 4},
  704. };
  705. tool_schema.at("required").push_back("id");
  706. }
  707. tool_call_schemas.emplace_back(tool_schema);
  708. });
  709. const auto tool_call =
  710. inputs.parallel_tool_calls
  711. ? json {
  712. {"type", "object"},
  713. {"properties", {
  714. {"tool_calls", {
  715. {"type", "array"},
  716. {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  717. {"anyOf", tool_call_schemas},
  718. }},
  719. {"minItems", 1},
  720. }},
  721. }},
  722. {"required", json::array({"tool_calls"})},
  723. }
  724. : json {
  725. {"type", "object"},
  726. {"properties", {
  727. {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  728. {"anyOf", tool_call_schemas},
  729. }},
  730. }},
  731. {"required", json::array({"tool_call"})},
  732. };
  733. const auto schema =
  734. inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED
  735. ? json {
  736. {"anyOf", json::array({
  737. tool_call,
  738. {
  739. {"type", "object"},
  740. {"properties", {
  741. {"response", inputs.json_schema.is_null()
  742. ? json {{"type", "string"}}
  743. : inputs.json_schema
  744. },
  745. }},
  746. {"required", json::array({"response"})},
  747. },
  748. })}
  749. }
  750. : tool_call;
  751. data.grammar_lazy = false;
  752. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  753. builder.add_schema("root", schema);
  754. });
  755. auto tweaked_messages = common_chat_template::add_system(
  756. inputs.messages,
  757. "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request");
  758. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  759. data.format = COMMON_CHAT_FORMAT_GENERIC;
  760. return data;
  761. }
  762. static common_chat_params common_chat_params_init_mistral_nemo(const common_chat_template & tmpl, const struct templates_params & inputs) {
  763. common_chat_params data;
  764. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  765. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  766. auto schemas = json::array();
  767. foreach_function(inputs.tools, [&](const json & tool) {
  768. const auto & function = tool.at("function");
  769. schemas.push_back({
  770. {"type", "object"},
  771. {"properties", {
  772. // Important note: the model is probably trained to take a JSON stringified arguments value.
  773. // It's hard to constrain that for now (while reusing the JSON schema conversion), so we're just expecting a plain object.
  774. {"name", {
  775. {"type", "string"},
  776. {"const", function.at("name")},
  777. }},
  778. {"arguments", function.at("parameters")},
  779. {"id", {
  780. {"type", "string"},
  781. // Nemo's template expects a 9-character alphanumeric ID.
  782. {"pattern", "^[a-zA-Z0-9]{9}$"},
  783. }},
  784. }},
  785. {"required", json::array({"name", "arguments", "id"})},
  786. });
  787. });
  788. auto schema = json {
  789. {"type", "array"},
  790. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  791. {"minItems", 1},
  792. };
  793. if (!inputs.parallel_tool_calls) {
  794. schema["maxItems"] = 1;
  795. }
  796. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  797. });
  798. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  799. data.preserved_tokens = {
  800. "[TOOL_CALLS]",
  801. };
  802. data.prompt = apply(tmpl, inputs);
  803. data.format = COMMON_CHAT_FORMAT_MISTRAL_NEMO;
  804. return data;
  805. }
  806. // Case-insensitive find
  807. static size_t ifind_string(const std::string & haystack, const std::string & needle, size_t pos = 0) {
  808. auto it = std::search(
  809. haystack.begin() + pos, haystack.end(),
  810. needle.begin(), needle.end(),
  811. [](char a, char b) { return std::tolower(a) == std::tolower(b); }
  812. );
  813. return (it == haystack.end()) ? std::string::npos : std::distance(haystack.begin(), it);
  814. }
  815. static common_chat_params common_chat_params_init_lfm2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  816. common_chat_params data;
  817. const auto is_json_schema_provided = !inputs.json_schema.is_null();
  818. const auto is_grammar_provided = !inputs.grammar.empty();
  819. const auto are_tools_provided = inputs.tools.is_array() && !inputs.tools.empty();
  820. // the logic requires potentially modifying the messages
  821. auto tweaked_messages = inputs.messages;
  822. auto replace_json_schema_marker = [](json & messages) -> bool {
  823. static std::string marker1 = "force json schema.\n";
  824. static std::string marker2 = "force json schema.";
  825. if (messages.empty() || messages.at(0).at("role") != "system") {
  826. return false;
  827. }
  828. std::string content = messages.at(0).at("content");
  829. for (const auto & marker : {marker1, marker2}) {
  830. const auto pos = ifind_string(content, marker);
  831. if (pos != std::string::npos) {
  832. content.replace(pos, marker.length(), "");
  833. // inject modified content back into the messages
  834. messages.at(0).at("content") = content;
  835. return true;
  836. }
  837. }
  838. return false;
  839. };
  840. // Lfm2 model does not natively work with json, but can generally understand the tools structure
  841. //
  842. // Example of the pytorch dialog structure:
  843. // <|startoftext|><|im_start|>system
  844. // List of tools: <|tool_list_start|>[{"name": "get_candidate_status", "description": "Retrieves the current status of a candidate in the recruitment process", "parameters": {"type": "object", "properties": {"candidate_id": {"type": "string", "description": "Unique identifier for the candidate"}}, "required": ["candidate_id"]}}]<|tool_list_end|><|im_end|>
  845. // <|im_start|>user
  846. // What is the current status of candidate ID 12345?<|im_end|>
  847. // <|im_start|>assistant
  848. // <|tool_call_start|>[get_candidate_status(candidate_id="12345")]<|tool_call_end|>Checking the current status of candidate ID 12345.<|im_end|>
  849. // <|im_start|>tool
  850. // <|tool_response_start|>{"candidate_id": "12345", "status": "Interview Scheduled", "position": "Clinical Research Associate", "date": "2023-11-20"}<|tool_response_end|><|im_end|>
  851. // <|im_start|>assistant
  852. // The candidate with ID 12345 is currently in the "Interview Scheduled" stage for the position of Clinical Research Associate, with an interview date set for 2023-11-20.<|im_end|>
  853. //
  854. // For the llama server compatibility with json tools semantic,
  855. // the client can add "Follow json schema." line into the system message prompt to force the json output.
  856. //
  857. if (are_tools_provided && (is_json_schema_provided || is_grammar_provided)) {
  858. // server/utils.hpp prohibits that branch for the custom grammar anyways
  859. throw std::runtime_error("Tools call must not use \"json_schema\" or \"grammar\", use non-tool invocation if you want to use custom grammar");
  860. } else if (are_tools_provided && replace_json_schema_marker(tweaked_messages)) {
  861. LOG_INF("%s: Using tools to build a grammar\n", __func__);
  862. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  863. auto schemas = json::array();
  864. foreach_function(inputs.tools, [&](const json & tool) {
  865. const auto & function = tool.at("function");
  866. schemas.push_back({
  867. {"type", "object"},
  868. {"properties", {
  869. {"name", {
  870. {"type", "string"},
  871. {"const", function.at("name")},
  872. }},
  873. {"arguments", function.at("parameters")},
  874. }},
  875. {"required", json::array({"name", "arguments", "id"})},
  876. });
  877. });
  878. auto schema = json {
  879. {"type", "array"},
  880. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  881. {"minItems", 1},
  882. };
  883. if (!inputs.parallel_tool_calls) {
  884. schema["maxItems"] = 1;
  885. }
  886. builder.add_rule("root", "\"<|tool_call_start|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tool_call_end|>\"");
  887. });
  888. // model has no concept of tool selection mode choice,
  889. // if the system prompt rendered correctly it will produce a tool call
  890. // the grammar goes inside the tool call body
  891. data.grammar_lazy = true;
  892. data.grammar_triggers = {{COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL, "\\s*<\\|tool_call_start\\|>\\s*\\["}};
  893. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  894. data.format = COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS;
  895. } else if (are_tools_provided && (!is_json_schema_provided && !is_grammar_provided)) {
  896. LOG_INF("%s: Using tools without json schema or grammar\n", __func__);
  897. // output those tokens
  898. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  899. } else if (is_json_schema_provided) {
  900. LOG_INF("%s: Using provided json schema to build a grammar\n", __func__);
  901. data.grammar = json_schema_to_grammar(inputs.json_schema);
  902. } else if (is_grammar_provided) {
  903. LOG_INF("%s: Using provided grammar\n", __func__);
  904. data.grammar = inputs.grammar;
  905. } else {
  906. LOG_INF("%s: Using content relying on the template\n", __func__);
  907. }
  908. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  909. LOG_DBG("%s: Prompt: %s\n", __func__, data.prompt.c_str());
  910. return data;
  911. }
  912. static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) {
  913. common_chat_params data;
  914. data.prompt = apply(tmpl, inputs);
  915. data.format = COMMON_CHAT_FORMAT_MAGISTRAL;
  916. data.preserved_tokens = {
  917. "[THINK]",
  918. "[/THINK]",
  919. };
  920. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  921. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  922. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  923. auto schemas = json::array();
  924. foreach_function(inputs.tools, [&](const json & tool) {
  925. const auto & function = tool.at("function");
  926. schemas.push_back({
  927. {"type", "object"},
  928. {"properties", {
  929. {"name", {
  930. {"type", "string"},
  931. {"const", function.at("name")},
  932. }},
  933. {"arguments", function.at("parameters")},
  934. {"id", {
  935. {"type", "string"},
  936. {"pattern", "^[a-zA-Z0-9]{9}$"},
  937. }},
  938. }},
  939. {"required", json::array({"name", "arguments", "id"})},
  940. });
  941. });
  942. auto schema = json {
  943. {"type", "array"},
  944. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  945. {"minItems", 1},
  946. };
  947. if (!inputs.parallel_tool_calls) {
  948. schema["maxItems"] = 1;
  949. }
  950. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  951. });
  952. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  953. data.preserved_tokens.push_back("[TOOL_CALLS]");
  954. } else {
  955. data.grammar_lazy = false;
  956. if (!inputs.json_schema.is_null()) {
  957. if (!inputs.grammar.empty()) {
  958. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  959. }
  960. data.grammar = json_schema_to_grammar(inputs.json_schema);
  961. } else {
  962. data.grammar = inputs.grammar;
  963. }
  964. }
  965. return data;
  966. }
  967. static common_chat_params common_chat_params_init_command_r7b(const common_chat_template & tmpl, const struct templates_params & inputs) {
  968. common_chat_params data;
  969. auto adjusted_messages = json::array();
  970. for (const auto & msg : inputs.messages) {
  971. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  972. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  973. if (has_reasoning_content && has_tool_calls) {
  974. auto adjusted_message = msg;
  975. adjusted_message["tool_plan"] = msg.at("reasoning_content");
  976. adjusted_message.erase("reasoning_content");
  977. adjusted_messages.push_back(adjusted_message);
  978. } else {
  979. adjusted_messages.push_back(msg);
  980. }
  981. }
  982. data.prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  983. data.format = COMMON_CHAT_FORMAT_COMMAND_R7B;
  984. if (string_ends_with(data.prompt, "<|START_THINKING|>")) {
  985. if (!inputs.enable_thinking) {
  986. data.prompt += "<|END_THINKING|>";
  987. } else {
  988. data.thinking_forced_open = true;
  989. }
  990. } else if (!inputs.enable_thinking && string_ends_with(data.prompt, "<|CHATBOT_TOKEN|>")) {
  991. data.prompt += "<|START_THINKING|><|END_THINKING|>";
  992. }
  993. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  994. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  995. auto schemas = json::array();
  996. foreach_function(inputs.tools, [&](const json & tool) {
  997. const auto & function = tool.at("function");
  998. schemas.push_back({
  999. {"type", "object"},
  1000. {"properties", {
  1001. {"tool_call_id", {
  1002. {"type", "string"},
  1003. // Command-R's template expects an integer string.
  1004. {"pattern", "^[0-9]{1,10}$"},
  1005. }},
  1006. {"tool_name", {
  1007. {"type", "string"},
  1008. {"const", function.at("name")},
  1009. }},
  1010. {"parameters", function.at("parameters")},
  1011. }},
  1012. {"required", json::array({"tool_call_id", "tool_name", "parameters"})},
  1013. });
  1014. });
  1015. auto schema = json {
  1016. {"type", "array"},
  1017. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1018. {"minItems", 1},
  1019. };
  1020. if (!inputs.parallel_tool_calls) {
  1021. schema["maxItems"] = 1;
  1022. }
  1023. builder.add_rule("root",
  1024. std::string(data.thinking_forced_open ? "( \"<|END_THINKING|>\" space )? " : "") +
  1025. "\"<|START_ACTION|>\" " + builder.add_schema("tool_calls", schema) + " \"<|END_ACTION|>\"");
  1026. });
  1027. data.grammar_triggers.push_back({
  1028. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1029. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1030. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1031. std::string(data.thinking_forced_open ? "[\\s\\S]*?(<\\|END_THINKING\\|>\\s*)" : "(?:<\\|START_THINKING\\|>[\\s\\S]*?<\\|END_THINKING\\|>\\s*)?") +
  1032. "(<\\|START_ACTION\\|>)[\\s\\S]*"
  1033. });
  1034. data.preserved_tokens = {
  1035. "<|START_ACTION|>",
  1036. "<|END_ACTION|>",
  1037. "<|START_RESPONSE|>",
  1038. "<|END_RESPONSE|>",
  1039. "<|START_THINKING|>",
  1040. "<|END_THINKING|>",
  1041. };
  1042. return data;
  1043. }
  1044. static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector<std::string> & expected_properties) {
  1045. if (!parameters.is_object() || !parameters.contains("type") || parameters.at("type") != "object" || !parameters.contains("properties") || !parameters.contains("required")) {
  1046. throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties");
  1047. }
  1048. const auto & parameters_properties = parameters.at("properties");
  1049. const auto & parameters_required = parameters.at("required");
  1050. for (const auto & prop : expected_properties) {
  1051. if (!parameters_properties.contains(prop)) {
  1052. throw std::runtime_error("Parameters of tool " + name + " is missing property: " + prop); // NOLINT
  1053. }
  1054. if (std::find(parameters_required.begin(), parameters_required.end(), json(prop)) == parameters_required.end()) {
  1055. throw std::runtime_error("Parameters of tool " + name + " must have property marked as required: " + prop); // NOLINT
  1056. }
  1057. }
  1058. if (parameters_properties.size() != expected_properties.size()) {
  1059. throw std::runtime_error("Parameters of tool " + name + " must only have these properties:" + string_join(expected_properties, ", "));
  1060. }
  1061. }
  1062. static common_chat_params common_chat_params_init_llama_3_x(const common_chat_template & tmpl, const struct templates_params & inputs, bool allow_python_tag_builtin_tools) {
  1063. auto builtin_tools = json::array();
  1064. common_chat_params data;
  1065. if (!inputs.tools.is_null()) {
  1066. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1067. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1068. std::vector<std::string> tool_rules;
  1069. auto handle_builtin_tool = [&](const std::string & name, const json & parameters) {
  1070. if (name == "wolfram_alpha" || name == "web_search" || name == "brave_search") {
  1071. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
  1072. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
  1073. expect_tool_parameters(name, parameters, {"query"});
  1074. } else if (name == "python" || name == "code_interpreter") {
  1075. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
  1076. expect_tool_parameters(name, parameters, {"code"});
  1077. } else {
  1078. return false;
  1079. }
  1080. std::vector<std::string> kvs;
  1081. for (const auto & [key, value] : parameters.at("properties").items()) {
  1082. kvs.push_back("\"" + key + "=\" " + builder.add_schema(name + "-args-" + key, value)); // NOLINT
  1083. }
  1084. tool_rules.push_back(
  1085. builder.add_rule(
  1086. name + "-call",
  1087. "\"<|python_tag|>" + name + ".call(\" " + string_join(kvs, " \", \" ") + " \")\""));
  1088. builtin_tools.push_back(name);
  1089. return true;
  1090. };
  1091. foreach_function(inputs.tools, [&](const json & tool) {
  1092. const auto & function = tool.at("function");
  1093. std::string name = function.at("name");
  1094. auto parameters = function.at("parameters");
  1095. builder.resolve_refs(parameters);
  1096. // https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote/tool_runtime
  1097. if (allow_python_tag_builtin_tools) {
  1098. handle_builtin_tool(name, parameters);
  1099. }
  1100. tool_rules.push_back(
  1101. builder.add_rule(
  1102. name + "-call",
  1103. "\"{\" space "
  1104. "( \"\\\"type\\\"\" space \":\" space \"\\\"function\\\"\" space \",\" space )? "
  1105. " \"\\\"name\\\"\" space \":\" space \"\\\"" + name + "\\\"\" space \",\" space "
  1106. " \"\\\"parameters\\\"\" space \":\" space " + builder.add_schema(name + "-args", parameters) + " "
  1107. "\"}\" space"));
  1108. });
  1109. // Small models may hallucinate function names so we match anything (*at the start*) that looks like the JSON of a function call, regardless of the name.
  1110. data.grammar_triggers.push_back({
  1111. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1112. "(\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\")[\\s\\S]*", // + name + "\"[\\s\\S]*",
  1113. });
  1114. if (!builtin_tools.empty()) {
  1115. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1116. data.preserved_tokens.push_back("<|python_tag|>");
  1117. }
  1118. // Allow a few empty lines on top of the usual constrained json schema space rule.
  1119. builder.add_rule("root", string_join(tool_rules, " | "));
  1120. data.additional_stops.push_back("<|eom_id|>");
  1121. });
  1122. data.format = allow_python_tag_builtin_tools && !builtin_tools.empty()
  1123. ? COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS
  1124. : COMMON_CHAT_FORMAT_LLAMA_3_X;
  1125. } else {
  1126. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1127. }
  1128. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, json {
  1129. {"date_string", format_time(inputs.now, "%d %b %Y")},
  1130. {"tools_in_user_message", false},
  1131. {"builtin_tools", builtin_tools.empty() ? json() : builtin_tools},
  1132. });
  1133. return data;
  1134. }
  1135. static common_chat_params common_chat_params_init_nemotron_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1136. common_chat_params data;
  1137. // Generate the prompt using the apply() function with the template
  1138. data.prompt = apply(tmpl, inputs);
  1139. data.format = COMMON_CHAT_FORMAT_NEMOTRON_V2;
  1140. // Handle thinking tags appropriately based on inputs.enable_thinking
  1141. if (string_ends_with(data.prompt, "<think>\n")) {
  1142. if (!inputs.enable_thinking) {
  1143. data.prompt += "</think>";
  1144. } else {
  1145. data.thinking_forced_open = true;
  1146. }
  1147. }
  1148. // When tools are present, build grammar for the <TOOLCALL> format, similar to CommandR, but without tool call ID
  1149. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1150. data.grammar_lazy = true;
  1151. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1152. auto schemas = json::array();
  1153. foreach_function(inputs.tools, [&](const json & tool) {
  1154. const auto & function = tool.at("function");
  1155. schemas.push_back({
  1156. { "type", "object" },
  1157. { "properties",
  1158. {
  1159. { "name",
  1160. {
  1161. { "type", "string" },
  1162. { "const", function.at("name") },
  1163. } },
  1164. { "arguments", function.at("parameters") },
  1165. } },
  1166. { "required", json::array({ "name", "arguments" }) },
  1167. });
  1168. });
  1169. auto schema = json{
  1170. { "type", "array" },
  1171. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1172. { "minItems", 1 },
  1173. };
  1174. if (!inputs.parallel_tool_calls) {
  1175. schema["maxItems"] = 1;
  1176. }
  1177. builder.add_rule("root",
  1178. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1179. "\"<TOOLCALL>\" " + builder.add_schema("tool_calls", schema) +
  1180. " \"</TOOLCALL>\"");
  1181. });
  1182. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1183. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1184. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1185. std::string(data.thinking_forced_open ?
  1186. "[\\s\\S]*?(</think>\\s*)" :
  1187. "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1188. "(<TOOLCALL>)[\\s\\S]*" });
  1189. }
  1190. return data;
  1191. }
  1192. static common_chat_params common_chat_params_init_apertus(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1193. common_chat_params data;
  1194. // Generate the prompt using the apply() function with the template
  1195. data.prompt = apply(tmpl, inputs);
  1196. data.format = COMMON_CHAT_FORMAT_APERTUS;
  1197. // Handle thinking tags appropriately based on inputs.enable_thinking
  1198. if (string_ends_with(data.prompt, "<|inner_prefix|>")) {
  1199. if (!inputs.enable_thinking) {
  1200. data.prompt += "<|inner_suffix|>";
  1201. } else {
  1202. data.thinking_forced_open = true;
  1203. }
  1204. }
  1205. // When tools are present, build grammar for the <|tools_prefix|> format
  1206. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1207. data.grammar_lazy = true;
  1208. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1209. auto schemas = json::array();
  1210. foreach_function(inputs.tools, [&](const json & tool) {
  1211. const auto & function = tool.at("function");
  1212. schemas.push_back({
  1213. { "type", "object" },
  1214. { "properties",
  1215. {
  1216. { function.at("name"), function.at("parameters") }
  1217. } },
  1218. { "required", json::array({ function.at("name") }) },
  1219. });
  1220. });
  1221. auto schema = json{
  1222. { "type", "array" },
  1223. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1224. { "minItems", 1 },
  1225. };
  1226. if (!inputs.parallel_tool_calls) {
  1227. schema["maxItems"] = 1;
  1228. }
  1229. builder.add_rule("root",
  1230. std::string(data.thinking_forced_open ? "( \"<|inner_suffix|>\" space )? " : "") +
  1231. "\"<|tools_prefix|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tools_suffix|>\"");
  1232. });
  1233. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1234. // If thinking_forced_open, then we capture the <|inner_suffix|> tag in the grammar,
  1235. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1236. std::string(data.thinking_forced_open ?
  1237. "[\\s\\S]*?(<\\|inner_suffix\\|>\\s*)" :
  1238. "(?:<\\|inner_prefix\\|>[\\s\\S]*?<\\|inner_suffix\\|>\\s*)?") +
  1239. "(<\\|tools_prefix\\|>)[\\s\\S]*" });
  1240. data.preserved_tokens = {
  1241. "<|system_start|>",
  1242. "<|system_end|>",
  1243. "<|developer_start|>",
  1244. "<|developer_end|>",
  1245. "<|user_start|>",
  1246. "<|user_end|>",
  1247. "<|assistant_start|>",
  1248. "<|assistant_end|>",
  1249. "<|inner_prefix|>",
  1250. "<|inner_suffix|>",
  1251. "<|tools_prefix|>",
  1252. "<|tools_suffix|>",
  1253. };
  1254. }
  1255. return data;
  1256. }
  1257. static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1258. common_chat_params data;
  1259. auto prompt = apply(tmpl, inputs);
  1260. // Hacks to fix the official (broken) prompt.
  1261. // It is advisable to use --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja instead,
  1262. // until the official template is fixed.
  1263. if (tmpl.source().find("{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}") != std::string::npos) {
  1264. // Don't leave the chat dangling after tool results
  1265. if (string_ends_with(prompt, "<|tool▁outputs▁end|>")) {
  1266. prompt += "<|end▁of▁sentence|>";
  1267. if (inputs.add_generation_prompt) {
  1268. prompt += "<|Assistant|>";
  1269. }
  1270. }
  1271. // Fix up tool call delta example added by Minja
  1272. prompt = std::regex_replace(
  1273. prompt,
  1274. std::regex("(<|tool▁call▁end|>)[\\s\\r\\n]*(<|tool▁outputs▁begin|>|<|User|>)"),
  1275. "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2");
  1276. }
  1277. data.prompt = prompt;
  1278. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_R1;
  1279. if (string_ends_with(data.prompt, "<think>\n")) {
  1280. if (!inputs.enable_thinking) {
  1281. data.prompt += "</think>";
  1282. } else {
  1283. data.thinking_forced_open = true;
  1284. }
  1285. }
  1286. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1287. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1288. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1289. std::vector<std::string> tool_rules;
  1290. foreach_function(inputs.tools, [&](const json & tool) {
  1291. const auto & function = tool.at("function");
  1292. std::string name = function.at("name");
  1293. auto parameters = function.at("parameters");
  1294. builder.resolve_refs(parameters);
  1295. tool_rules.push_back(builder.add_rule(name + "-call",
  1296. "( \"<|tool▁call▁begin|>\" )? \"function<|tool▁sep|>" + name + "\\n"
  1297. "```json\\n\" " + builder.add_schema(name + "-args", parameters) + " "
  1298. "\"```<|tool▁call▁end|>\""));
  1299. });
  1300. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1301. // so we accept common variants (then it's all constrained)
  1302. builder.add_rule("root",
  1303. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1304. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1305. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1306. "\"<|tool▁calls▁end|>\""
  1307. " space");
  1308. data.grammar_triggers.push_back({
  1309. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1310. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1311. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1312. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1313. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1314. });
  1315. data.preserved_tokens = {
  1316. "<think>",
  1317. "</think>",
  1318. "<|tool▁calls▁begin|>",
  1319. "<|tool▁call▁begin|>",
  1320. "<|tool▁sep|>",
  1321. "<|tool▁call▁end|>",
  1322. "<|tool▁calls▁end|",
  1323. };
  1324. });
  1325. }
  1326. return data;
  1327. }
  1328. static common_chat_params common_chat_params_init_deepseek_v3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1329. common_chat_params data;
  1330. // Pass thinking context for DeepSeek V3.1 template
  1331. json additional_context = {
  1332. {"thinking", inputs.enable_thinking},
  1333. };
  1334. auto prompt = apply(tmpl, inputs,
  1335. /* messages_override= */ inputs.messages,
  1336. /* tools_override= */ std::nullopt,
  1337. additional_context);
  1338. data.prompt = prompt;
  1339. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_V3_1;
  1340. if (string_ends_with(data.prompt, "<think>")) {
  1341. if (!inputs.enable_thinking) {
  1342. data.prompt += "</think>";
  1343. } else {
  1344. data.thinking_forced_open = true;
  1345. }
  1346. }
  1347. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1348. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1349. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1350. std::vector<std::string> tool_rules;
  1351. foreach_function(inputs.tools, [&](const json & tool) {
  1352. const auto & function = tool.at("function");
  1353. std::string name = function.at("name");
  1354. auto parameters = function.at("parameters");
  1355. builder.resolve_refs(parameters);
  1356. tool_rules.push_back(builder.add_rule(name + "-call",
  1357. "( \"<|tool▁call▁begin|>\" )? \"" + name + "<|tool▁sep|>"
  1358. "\" " + builder.add_schema(name + "-args", parameters) + " "
  1359. "\"<|tool▁call▁end|>\""));
  1360. });
  1361. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1362. // so we accept common variants (then it's all constrained)
  1363. builder.add_rule("root",
  1364. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1365. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1366. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1367. "\"<|tool▁calls▁end|>\""
  1368. " space");
  1369. data.grammar_triggers.push_back({
  1370. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1371. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1372. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1373. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1374. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1375. });
  1376. data.preserved_tokens = {
  1377. "<think>",
  1378. "</think>",
  1379. "<|tool▁calls▁begin|>",
  1380. "<|tool▁call▁begin|>",
  1381. "<|tool▁sep|>",
  1382. "<|tool▁call▁end|>",
  1383. "<|tool▁calls▁end|>",
  1384. };
  1385. });
  1386. }
  1387. return data;
  1388. }
  1389. static common_chat_params common_chat_params_init_minimax_m2(const common_chat_template & tmpl, const struct templates_params & params) {
  1390. common_chat_params data;
  1391. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1392. data.prompt = apply(tmpl, params);
  1393. data.format = COMMON_CHAT_FORMAT_MINIMAX_M2;
  1394. // Handle thinking tags based on prompt ending
  1395. if (string_ends_with(data.prompt, "<think>\n")) {
  1396. if (!params.enable_thinking) {
  1397. // Close the thinking tag immediately if thinking is disabled
  1398. data.prompt += "</think>\n\n";
  1399. } else {
  1400. // Mark thinking as forced open (template started with <think>)
  1401. data.thinking_forced_open = true;
  1402. }
  1403. }
  1404. // Preserve MiniMax-M2 special tokens
  1405. data.preserved_tokens = {
  1406. "<think>",
  1407. "</think>",
  1408. "<minimax:tool_call>",
  1409. "</minimax:tool_call>",
  1410. };
  1411. // build grammar for tool call
  1412. static const xml_tool_call_format form {
  1413. /* form.scope_start = */ "<minimax:tool_call>\n",
  1414. /* form.tool_start = */ "<invoke name=\"",
  1415. /* form.tool_sep = */ "\">\n",
  1416. /* form.key_start = */ "<parameter name=\"",
  1417. /* form.key_val_sep = */ "\">",
  1418. /* form.val_end = */ "</parameter>\n",
  1419. /* form.tool_end = */ "</invoke>\n",
  1420. /* form.scope_end = */ "</minimax:tool_call>",
  1421. };
  1422. build_grammar_xml_tool_call(data, params.tools, form);
  1423. return data;
  1424. }
  1425. static common_chat_params common_chat_params_init_qwen3_coder_xml(const common_chat_template & tmpl, const struct templates_params & params) {
  1426. common_chat_params data;
  1427. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1428. data.prompt = apply(tmpl, params);
  1429. data.format = COMMON_CHAT_FORMAT_QWEN3_CODER_XML;
  1430. data.preserved_tokens = {
  1431. "<tool_call>",
  1432. "</tool_call>",
  1433. "<function=",
  1434. "</function>",
  1435. "<parameter=",
  1436. "</parameter>",
  1437. };
  1438. // build grammar for tool call
  1439. static const xml_tool_call_format form {
  1440. /* form.scope_start = */ "<tool_call>\n",
  1441. /* form.tool_start = */ "<function=",
  1442. /* form.tool_sep = */ ">\n",
  1443. /* form.key_start = */ "<parameter=",
  1444. /* form.key_val_sep = */ ">\n",
  1445. /* form.val_end = */ "\n</parameter>\n",
  1446. /* form.tool_end = */ "</function>\n",
  1447. /* form.scope_end = */ "</tool_call>",
  1448. };
  1449. build_grammar_xml_tool_call(data, params.tools, form);
  1450. return data;
  1451. }
  1452. static common_chat_params common_chat_params_init_kimi_k2(const common_chat_template & tmpl, const struct templates_params & params) {
  1453. common_chat_params data;
  1454. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1455. data.prompt = apply(tmpl, params);
  1456. data.format = COMMON_CHAT_FORMAT_KIMI_K2;
  1457. data.preserved_tokens = {
  1458. "<think>",
  1459. "</think>",
  1460. "<|tool_calls_section_begin|>",
  1461. "<|tool_call_begin|>",
  1462. "<|tool_call_argument_begin|>",
  1463. "<|tool_call_end|>",
  1464. "<|tool_calls_section_end|>",
  1465. "<|im_end|>",
  1466. "<|im_system|>",
  1467. "<|im_middle|>",
  1468. };
  1469. data.additional_stops.insert(data.additional_stops.end(), {
  1470. "<|im_end|>",
  1471. "<|im_middle|>"
  1472. });
  1473. // build grammar for tool call
  1474. static const xml_tool_call_format form = ([]() {
  1475. xml_tool_call_format form {};
  1476. form.scope_start = "<|tool_calls_section_begin|>";
  1477. form.tool_start = "<|tool_call_begin|>";
  1478. form.tool_sep = "<|tool_call_argument_begin|>{";
  1479. form.key_start = "\"";
  1480. form.key_val_sep = "\": ";
  1481. form.val_end = ", ";
  1482. form.tool_end = "}<|tool_call_end|>";
  1483. form.scope_end = "<|tool_calls_section_end|>";
  1484. form.raw_argval = false;
  1485. form.last_val_end = "";
  1486. return form;
  1487. })();
  1488. build_grammar_xml_tool_call(data, params.tools, form);
  1489. return data;
  1490. }
  1491. static common_chat_params common_chat_params_init_apriel_1_5(const common_chat_template & tmpl, const struct templates_params & params) {
  1492. common_chat_params data;
  1493. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1494. data.prompt = apply(tmpl, params);
  1495. data.format = COMMON_CHAT_FORMAT_APRIEL_1_5;
  1496. data.preserved_tokens = {
  1497. "<thinking>",
  1498. "</thinking>",
  1499. "<tool_calls>",
  1500. "</tool_calls>",
  1501. };
  1502. // build grammar for tool call
  1503. static const xml_tool_call_format form = ([]() {
  1504. xml_tool_call_format form {};
  1505. form.scope_start = "<tool_calls>[";
  1506. form.tool_start = "{\"name\": \"";
  1507. form.tool_sep = "\", \"arguments\": {";
  1508. form.key_start = "\"";
  1509. form.key_val_sep = "\": ";
  1510. form.val_end = ", ";
  1511. form.tool_end = "}, ";
  1512. form.scope_end = "]</tool_calls>";
  1513. form.raw_argval = false;
  1514. form.last_val_end = "";
  1515. form.last_tool_end = "}";
  1516. return form;
  1517. })();
  1518. build_grammar_xml_tool_call(data, params.tools, form);
  1519. return data;
  1520. }
  1521. static common_chat_params common_chat_params_init_xiaomi_mimo(const common_chat_template & tmpl, const struct templates_params & params) {
  1522. common_chat_params data;
  1523. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1524. data.prompt = apply(tmpl, params);
  1525. data.format = COMMON_CHAT_FORMAT_XIAOMI_MIMO;
  1526. data.preserved_tokens = {
  1527. "<tool_call>",
  1528. "</tool_call>",
  1529. };
  1530. // build grammar for tool call
  1531. static const xml_tool_call_format form = ([]() {
  1532. xml_tool_call_format form {};
  1533. form.scope_start = "\n";
  1534. form.tool_start = "<tool_call>\n{\"name\": \"";
  1535. form.tool_sep = "\", \"arguments\": {";
  1536. form.key_start = "\"";
  1537. form.key_val_sep = "\": ";
  1538. form.val_end = ", ";
  1539. form.tool_end = "}\n</tool_call>";
  1540. form.scope_end = "";
  1541. form.raw_argval = false;
  1542. form.last_val_end = "";
  1543. return form;
  1544. })();
  1545. build_grammar_xml_tool_call(data, params.tools, form);
  1546. return data;
  1547. }
  1548. static common_chat_params common_chat_params_init_gpt_oss(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1549. common_chat_params data;
  1550. // Copy reasoning to the "thinking" field as expected by the gpt-oss template
  1551. auto adjusted_messages = json::array();
  1552. for (const auto & msg : inputs.messages) {
  1553. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  1554. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  1555. if (has_reasoning_content && has_tool_calls) {
  1556. auto adjusted_message = msg;
  1557. adjusted_message["thinking"] = msg.at("reasoning_content");
  1558. adjusted_messages.push_back(adjusted_message);
  1559. } else {
  1560. adjusted_messages.push_back(msg);
  1561. }
  1562. }
  1563. auto prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  1564. // Check if we need to replace the return token with end token during
  1565. // inference and without generation prompt. For more details see:
  1566. // https://github.com/ggml-org/llama.cpp/issues/15417
  1567. if (inputs.is_inference && !inputs.add_generation_prompt) {
  1568. static constexpr std::string_view return_token = "<|return|>";
  1569. static constexpr std::string_view end_token = "<|end|>";
  1570. if (size_t pos = prompt.rfind(return_token); pos != std::string::npos) {
  1571. prompt.replace(pos, return_token.length(), end_token);
  1572. }
  1573. }
  1574. data.prompt = prompt;
  1575. data.format = COMMON_CHAT_FORMAT_GPT_OSS;
  1576. // These special tokens are required to parse properly, so we include them
  1577. // even if parse_tool_calls is false.
  1578. data.preserved_tokens = {
  1579. "<|channel|>",
  1580. "<|constrain|>",
  1581. "<|message|>",
  1582. "<|start|>",
  1583. "<|end|>",
  1584. };
  1585. if (!inputs.json_schema.is_null()) {
  1586. data.grammar_lazy = false;
  1587. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1588. auto schema = inputs.json_schema;
  1589. builder.resolve_refs(schema);
  1590. auto not_end = builder.add_rule("not-end",
  1591. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1592. auto analysis = builder.add_rule("analysis",
  1593. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1594. auto constraint = builder.add_rule("constraint", "\"<|constrain|>\"? [a-zA-Z0-9_-]+");
  1595. auto final = builder.add_rule("final",
  1596. "\"<|channel|>final\" ( \" \" " + constraint + " )? \"<|message|>\" " +
  1597. builder.add_schema("response", schema)
  1598. );
  1599. builder.add_rule("root", "( " + analysis + " \"<|start|>assistant\" )? " + final);
  1600. });
  1601. }
  1602. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1603. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1604. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1605. // tool calls can appear in commentary or analysis channels
  1606. auto channel = builder.add_rule("channel", "\"<|channel|>\" ( \"commentary\" | \"analysis\" )");
  1607. std::vector<std::string> tool_rules_recipient_in_role;
  1608. std::vector<std::string> tool_rules_recipient_in_channel;
  1609. foreach_function(inputs.tools, [&](const json & tool) {
  1610. const auto & function = tool.at("function");
  1611. std::string name = function.at("name");
  1612. auto parameters = function.at("parameters");
  1613. builder.resolve_refs(parameters);
  1614. tool_rules_recipient_in_role.push_back(
  1615. builder.add_rule(name + "-call",
  1616. "\"" + name + "\"" + channel + " \" <|constrain|>json\"? \"<|message|>\" " +
  1617. builder.add_schema(name + "-args", parameters)
  1618. )
  1619. );
  1620. tool_rules_recipient_in_channel.push_back(
  1621. builder.add_rule(name + "-call",
  1622. "\"" + name + "\"" + " \" <|constrain|>json\"? \"<|message|>\" " +
  1623. builder.add_schema(name + "-args", parameters)
  1624. )
  1625. );
  1626. });
  1627. auto recipient_in_channel = builder.add_rule("recipient_in_channel",
  1628. channel + " \" to=functions.\" ( " +
  1629. string_join(tool_rules_recipient_in_channel, " | ") + " )"
  1630. );
  1631. if (data.grammar_lazy) {
  1632. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1633. "\"<|start|>assistant\"? \" to=functions.\" ( " +
  1634. string_join(tool_rules_recipient_in_role, " | ") + " )"
  1635. );
  1636. builder.add_rule("root", recipient_in_role + " | " + recipient_in_channel);
  1637. } else {
  1638. auto not_end = builder.add_rule("not-end",
  1639. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1640. auto analysis = builder.add_rule("analysis",
  1641. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1642. auto commentary = builder.add_rule("commentary",
  1643. "\"<|channel|>commentary<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1644. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1645. "\" to=functions.\" ( " + string_join(tool_rules_recipient_in_role, " | ") + " )"
  1646. );
  1647. builder.add_rule("root",
  1648. "( " + analysis + " \"<|start|>assistant\" )? " +
  1649. "( " + commentary + " \"<|start|>assistant\" )? " +
  1650. "( " + recipient_in_role + " | " + recipient_in_channel + " )"
  1651. );
  1652. }
  1653. // Trigger on tool calls that appear in the commentary channel
  1654. data.grammar_triggers.push_back({
  1655. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1656. "<\\|channel\\|>(commentary|analysis) to"
  1657. });
  1658. // Trigger tool calls that appear in the role section, either at the
  1659. // start or in the middle.
  1660. data.grammar_triggers.push_back({
  1661. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1662. "^ to"
  1663. });
  1664. data.grammar_triggers.push_back({
  1665. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1666. "<\\|start\\|>assistant to"
  1667. });
  1668. });
  1669. }
  1670. return data;
  1671. }
  1672. static common_chat_params common_chat_params_init_glm_4_5(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1673. common_chat_params data;
  1674. data.grammar_lazy = inputs.tools.is_array() && !inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1675. std::string prompt = apply(tmpl, inputs);
  1676. // match the existing trimming behavior
  1677. if (inputs.add_bos && string_starts_with(prompt, tmpl.bos_token())) {
  1678. prompt.erase(0, tmpl.bos_token().size());
  1679. }
  1680. if (inputs.add_eos && string_ends_with(prompt, tmpl.eos_token())) {
  1681. prompt.erase(prompt.size() - tmpl.eos_token().size());
  1682. }
  1683. if (string_ends_with(prompt, "<think>")) {
  1684. if (!inputs.enable_thinking) {
  1685. prompt += "</think>";
  1686. } else {
  1687. data.thinking_forced_open = true;
  1688. }
  1689. }
  1690. // add GLM preserved tokens
  1691. data.preserved_tokens = {
  1692. "<|endoftext|>",
  1693. "[MASK]",
  1694. "[gMASK]",
  1695. "[sMASK]",
  1696. "<sop>",
  1697. "<eop>",
  1698. "<|system|>",
  1699. "<|user|>",
  1700. "<|assistant|>",
  1701. "<|observation|>",
  1702. "<|begin_of_image|>",
  1703. "<|end_of_image|>",
  1704. "<|begin_of_video|>",
  1705. "<|end_of_video|>",
  1706. "<|begin_of_audio|>",
  1707. "<|end_of_audio|>",
  1708. "<|begin_of_transcription|>",
  1709. "<|end_of_transcription|>",
  1710. "<|code_prefix|>",
  1711. "<|code_middle|>",
  1712. "<|code_suffix|>",
  1713. "/nothink",
  1714. "<think>",
  1715. "</think>",
  1716. "<tool_call>",
  1717. "</tool_call>",
  1718. "<arg_key>",
  1719. "</arg_key>",
  1720. "<arg_value>",
  1721. "</arg_value>"
  1722. };
  1723. // extra GLM 4.5 stop word
  1724. data.additional_stops.insert(data.additional_stops.end(), {
  1725. "<|user|>",
  1726. "<|observation|>"
  1727. });
  1728. // build grammar for tool call
  1729. static const xml_tool_call_format form {
  1730. /* form.scope_start = */ "",
  1731. /* form.tool_start = */ "\n<tool_call>",
  1732. /* form.tool_sep = */ "\n",
  1733. /* form.key_start = */ "<arg_key>",
  1734. /* form.key_val_sep = */ "</arg_key>\n<arg_value>",
  1735. /* form.val_end = */ "</arg_value>\n",
  1736. /* form.tool_end = */ "</tool_call>\n",
  1737. /* form.scope_end = */ "",
  1738. };
  1739. build_grammar_xml_tool_call(data, inputs.tools, form);
  1740. data.prompt = prompt;
  1741. data.format = COMMON_CHAT_FORMAT_GLM_4_5;
  1742. return data;
  1743. }
  1744. static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1745. LOG_DBG("%s\n", __func__);
  1746. common_chat_params data;
  1747. const std::optional<json> tools_override = json();
  1748. const std::optional<json> additional_context = json {
  1749. {"datetime", format_time(inputs.now, "%b %d %Y %H:%M:%S GMT")},
  1750. {"functions", json(inputs.tools.empty() ? "" : inputs.tools.dump(2))},
  1751. };
  1752. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, tools_override, additional_context);
  1753. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1754. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1755. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1756. auto schemas = json::array();
  1757. foreach_function(inputs.tools, [&](const json & tool) {
  1758. const auto & function = tool.at("function");
  1759. schemas.push_back({
  1760. {"type", "object"},
  1761. {"properties", {
  1762. {"name", {
  1763. {"type", "string"},
  1764. {"const", function.at("name")},
  1765. }},
  1766. {"arguments", function.at("parameters")},
  1767. }},
  1768. {"required", json::array({"name", "arguments", "id"})},
  1769. });
  1770. });
  1771. auto schema = json {
  1772. {"type", "array"},
  1773. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1774. {"minItems", 1},
  1775. };
  1776. if (!inputs.parallel_tool_calls) {
  1777. schema["maxItems"] = 1;
  1778. }
  1779. builder.add_rule("root", "\" functools\"? " + builder.add_schema("tool_calls", schema));
  1780. });
  1781. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, " functools["});
  1782. data.preserved_tokens = {
  1783. " functools[",
  1784. };
  1785. data.format = COMMON_CHAT_FORMAT_FIREFUNCTION_V2;
  1786. } else {
  1787. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1788. }
  1789. return data;
  1790. }
  1791. static common_chat_params common_chat_params_init_functionary_v3_2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1792. // >>>all\nlet's call functions>>>fn1\n{"arg1": 1...}\n>>>fn2\n{"arg1": 1...}...
  1793. // Using ">>>f1\n", ">>>f2\n"... as trigger words for the grammar
  1794. // If the function is python, we also allow raw python code (if the line after `python\n` doesn't start w/ opening `{`), which the model seems to prefer for multiline code.
  1795. common_chat_params data;
  1796. data.prompt = apply(tmpl, inputs);
  1797. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2;
  1798. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1799. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1800. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1801. std::vector<std::string> first_tool_rules;
  1802. std::vector<std::string> subsequent_tool_rules;
  1803. foreach_function(inputs.tools, [&](const json & tool) {
  1804. const auto & function = tool.at("function");
  1805. std::string name = function.at("name");
  1806. auto parameters = function.at("parameters");
  1807. builder.resolve_refs(parameters);
  1808. std::string args_pattern = "[\\s\\S]*";
  1809. auto args_rule = builder.add_schema(name + "-args", parameters);
  1810. if (name == "python") {
  1811. args_rule = builder.add_rule(name + "-maybe-raw-args", args_rule + " | [^{] .*");
  1812. } else {
  1813. args_pattern = "\\{" + args_pattern;
  1814. }
  1815. auto call_rule = builder.add_rule(name + "-call", "\"" + name + "\\n\" " + args_rule);
  1816. first_tool_rules.push_back(call_rule);
  1817. if (inputs.parallel_tool_calls) {
  1818. subsequent_tool_rules.push_back(builder.add_rule(name + "-call2", "\">>>\" " + call_rule));
  1819. }
  1820. data.grammar_triggers.push_back({
  1821. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1822. "((?:[\\s\\S]+?>>>)?" + regex_escape(name) + "\n)" + args_pattern,
  1823. });
  1824. });
  1825. data.preserved_tokens = {
  1826. "<|end_header_id|>",
  1827. };
  1828. auto first_rule = first_tool_rules.empty() ? "" : builder.add_rule("first_tool_call", string_join(first_tool_rules, " | ")) + " space";
  1829. if (inputs.parallel_tool_calls) {
  1830. auto subsequent_rule = builder.add_rule("subsequent_tool_call", string_join(subsequent_tool_rules, " | ")) + " space";
  1831. builder.add_rule("root", first_rule + " (" + subsequent_rule + ")*");
  1832. } else {
  1833. builder.add_rule("root", first_rule);
  1834. }
  1835. });
  1836. }
  1837. return data;
  1838. }
  1839. static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1840. // https://github.com/MeetKai/functionary/blob/main/tests/prompt_test_v3-llama3.1.txt
  1841. common_chat_params data;
  1842. if (!inputs.tools.is_null()) {
  1843. std::string python_code_argument_name;
  1844. auto has_raw_python = false;
  1845. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1846. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1847. std::vector<std::string> tool_rules;
  1848. foreach_function(inputs.tools, [&](const json & tool) {
  1849. const auto & function = tool.at("function");
  1850. const auto & parameters = function.at("parameters");
  1851. std::string name = function.at("name");
  1852. if (name == "python" || name == "ipython") {
  1853. if (!parameters.contains("type")) {
  1854. throw std::runtime_error("Missing type in python tool");
  1855. }
  1856. has_raw_python = true;
  1857. const auto & type = parameters.at("type");
  1858. if (type == "object") {
  1859. auto properties = parameters.at("properties");
  1860. for (auto it = properties.begin(); it != properties.end(); ++it) {
  1861. if (it.value().at("type") == "string") {
  1862. if (!python_code_argument_name.empty()) {
  1863. throw std::runtime_error("Multiple string arguments found in python tool");
  1864. }
  1865. python_code_argument_name = it.key();
  1866. }
  1867. }
  1868. if (python_code_argument_name.empty()) {
  1869. throw std::runtime_error("No string argument found in python tool");
  1870. }
  1871. } else if (type != "string") {
  1872. throw std::runtime_error("Invalid type in python tool: " + type.dump());
  1873. }
  1874. }
  1875. tool_rules.push_back(builder.add_rule(name + "-call", "\"<function=" + name + ">\" " + builder.add_schema(name + "-args", parameters) + " \"</function>\" space"));
  1876. });
  1877. if (has_raw_python) {
  1878. tool_rules.push_back(builder.add_rule("python-call", "\"<|python_tag|>\" .*"));
  1879. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1880. data.preserved_tokens.push_back("<|python_tag|>");
  1881. }
  1882. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " space";
  1883. builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call);
  1884. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<function="});
  1885. });
  1886. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1;
  1887. } else {
  1888. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1889. }
  1890. data.prompt = apply(tmpl, inputs);
  1891. // TODO: if (has_raw_python)
  1892. return data;
  1893. }
  1894. static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1895. common_chat_params data;
  1896. json extra_context = json {
  1897. {"enable_thinking", inputs.enable_thinking},
  1898. };
  1899. extra_context.update(inputs.extra_context);
  1900. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, extra_context);
  1901. data.format = COMMON_CHAT_FORMAT_HERMES_2_PRO;
  1902. if (string_ends_with(data.prompt, "<think>\n")) {
  1903. if (!extra_context["enable_thinking"]) {
  1904. data.prompt += "</think>";
  1905. } else {
  1906. data.thinking_forced_open = true;
  1907. }
  1908. }
  1909. if (!inputs.tools.is_null()) {
  1910. // (content)?(<tool_call>{"name": "foo", "arguments": {"a": 1}}</tool_call>)*
  1911. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1912. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1913. std::vector<std::string> tool_rules;
  1914. std::vector<std::string> tool_call_alts;
  1915. std::vector<std::string> escaped_names;
  1916. foreach_function(inputs.tools, [&](const json & tool) {
  1917. const auto & function = tool.at("function");
  1918. std::string name = function.at("name");
  1919. auto parameters = function.at("parameters");
  1920. builder.resolve_refs(parameters);
  1921. tool_rules.push_back(builder.add_schema(name + "-call", {
  1922. {"type", "object"},
  1923. {"properties", json {
  1924. {"name", json {{"const", name}}},
  1925. {"arguments", parameters},
  1926. }},
  1927. {"required", json::array({"name", "arguments"})},
  1928. }));
  1929. tool_call_alts.push_back(builder.add_rule(
  1930. name + "-function-tag",
  1931. "\"<function\" ( \"=" + name + "\" | \" name=\\\"" + name + "\\\"\" ) \">\" space " +
  1932. builder.add_schema(name + "-args", parameters) + " "
  1933. "\"</function>\" space"));
  1934. data.grammar_triggers.push_back({
  1935. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  1936. "<function=" + name + ">",
  1937. });
  1938. auto escaped_name = regex_escape(name);
  1939. data.grammar_triggers.push_back({
  1940. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1941. "<function\\s+name\\s*=\\s*\"" + escaped_name + "\"",
  1942. });
  1943. escaped_names.push_back(escaped_name);
  1944. });
  1945. auto any_tool_call = builder.add_rule("any_tool_call", "( " + string_join(tool_rules, " | ") + " ) space");
  1946. std::vector<std::string> alt_tags {
  1947. any_tool_call,
  1948. "\"<tool_call>\" space " + any_tool_call + " \"</tool_call>\"",
  1949. // The rest is just to accommodate common "good bad" outputs.
  1950. "\"<function_call>\" space " + any_tool_call + " \"</function_call>\"",
  1951. "\"<response>\" space " + any_tool_call + " \"</response>\"",
  1952. "\"<tools>\" space " + any_tool_call + " \"</tools>\"",
  1953. "\"<json>\" space " + any_tool_call + " \"</json>\"",
  1954. "\"<xml>\" space " + any_tool_call + " \"</xml>\"",
  1955. "\"<JSON>\" space " + any_tool_call + " \"</JSON>\"",
  1956. };
  1957. auto wrappable_tool_call = builder.add_rule("wrappable_tool_call", "( " + string_join(alt_tags, " | ") + " ) space");
  1958. tool_call_alts.push_back(wrappable_tool_call);
  1959. tool_call_alts.push_back(
  1960. "( \"```\\n\" | \"```json\\n\" | \"```xml\\n\" ) space " + wrappable_tool_call + " space \"```\" space ");
  1961. auto tool_call = builder.add_rule("tool_call", string_join(tool_call_alts, " | "));
  1962. builder.add_rule("root",
  1963. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1964. (inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call));
  1965. // Trigger on some common known "good bad" outputs (only from the start and with a json that's about a specific argument name to avoid false positives)
  1966. data.grammar_triggers.push_back({
  1967. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1968. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1969. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1970. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") + (
  1971. "\\s*("
  1972. "(?:<tool_call>"
  1973. "|<function"
  1974. "|(?:```(?:json|xml)?\n\\s*)?(?:<function_call>|<tools>|<xml><json>|<response>)?"
  1975. "\\s*\\{\\s*\"name\"\\s*:\\s*\"(?:" + string_join(escaped_names, "|") + ")\""
  1976. ")"
  1977. ")[\\s\\S]*"
  1978. ),
  1979. });
  1980. data.preserved_tokens = {
  1981. "<think>",
  1982. "</think>",
  1983. "<tool_call>",
  1984. "</tool_call>",
  1985. "<function",
  1986. "<tools>",
  1987. "</tools>",
  1988. "<response>",
  1989. "</response>",
  1990. "<function_call>",
  1991. "</function_call>",
  1992. "<json>",
  1993. "</json>",
  1994. "<JSON>",
  1995. "</JSON>",
  1996. "```",
  1997. "```json",
  1998. "```xml",
  1999. };
  2000. });
  2001. }
  2002. return data;
  2003. }
  2004. static common_chat_params common_chat_params_init_granite(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2005. common_chat_params data;
  2006. // Pass thinking context for Granite template
  2007. json additional_context = {
  2008. {"thinking", inputs.enable_thinking},
  2009. };
  2010. data.prompt = apply(tmpl, inputs, /* messages_override= */ std::nullopt, /* tools_override= */ std::nullopt, additional_context);
  2011. data.format = COMMON_CHAT_FORMAT_GRANITE;
  2012. if (string_ends_with(data.prompt, "<think>\n") || string_ends_with(data.prompt, "<think>")) {
  2013. if (!inputs.enable_thinking) {
  2014. data.prompt += "</think>";
  2015. } else {
  2016. data.thinking_forced_open = true;
  2017. }
  2018. }
  2019. if (!inputs.tools.is_null()) {
  2020. // Granite uses <|tool_call|> followed by JSON list
  2021. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2022. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2023. std::vector<std::string> tool_rules;
  2024. foreach_function(inputs.tools, [&](const json & tool) {
  2025. const auto & function = tool.at("function");
  2026. std::string name = function.at("name");
  2027. auto parameters = function.at("parameters");
  2028. builder.resolve_refs(parameters);
  2029. tool_rules.push_back(builder.add_rule(name + "-call", builder.add_schema(name +
  2030. "-args", {
  2031. {"type", "object"},
  2032. {"properties", {
  2033. {"name", {{"const", name}}},
  2034. {"arguments", parameters},
  2035. }},
  2036. {"required", json::array({"name", "arguments"})},
  2037. })));
  2038. });
  2039. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | "));
  2040. auto tool_list = builder.add_rule("tool_list", "\"[\" space " + tool_call + " (\",\" space " + tool_call + ")* space \"]\"");
  2041. if (data.thinking_forced_open) {
  2042. builder.add_rule("root", "\"</think>\" space \"<response>\" space [^<]* \"</response>\" space \"<|tool_call|>\" space " + tool_list);
  2043. } else {
  2044. builder.add_rule("root", "\"<|tool_call|>\" space " + tool_list);
  2045. }
  2046. data.grammar_triggers.push_back({
  2047. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  2048. "<|tool_call|>"
  2049. });
  2050. data.preserved_tokens = {
  2051. "<think>",
  2052. "</think>",
  2053. "<response>",
  2054. "</response>",
  2055. "<|tool_call|>",
  2056. };
  2057. });
  2058. } else {
  2059. // Handle thinking tags for non-tool responses
  2060. if (data.thinking_forced_open && inputs.enable_thinking) {
  2061. data.grammar_lazy = false;
  2062. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2063. builder.add_rule("root", "\"</think>\" space \"<response>\" space .* \"</response>\" space");
  2064. });
  2065. data.preserved_tokens = {
  2066. "<think>",
  2067. "</think>",
  2068. "<response>",
  2069. "</response>",
  2070. };
  2071. }
  2072. }
  2073. return data;
  2074. }
  2075. static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2076. common_chat_params data;
  2077. data.prompt = apply(tmpl, inputs);
  2078. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  2079. data.grammar_lazy = false;
  2080. if (!inputs.json_schema.is_null()) {
  2081. if (!inputs.grammar.empty()) {
  2082. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  2083. }
  2084. data.grammar = json_schema_to_grammar(inputs.json_schema);
  2085. } else {
  2086. data.grammar = inputs.grammar;
  2087. }
  2088. return data;
  2089. }
  2090. static common_chat_params common_chat_params_init_seed_oss(
  2091. const common_chat_template & tmpl,
  2092. templates_params & params,
  2093. const common_chat_templates_inputs & inputs)
  2094. {
  2095. common_chat_params data;
  2096. data.prompt = apply(tmpl, params);
  2097. data.format = COMMON_CHAT_FORMAT_SEED_OSS;
  2098. if (string_ends_with(data.prompt, "<seed:think>")) {
  2099. if (!inputs.enable_thinking) {
  2100. data.prompt += "</seed:think>";
  2101. } else {
  2102. data.thinking_forced_open = true;
  2103. }
  2104. }
  2105. if (params.tools.is_array() && !params.tools.empty()) {
  2106. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2107. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2108. std::vector<std::string> tool_rules;
  2109. foreach_function(params.tools, [&](const json & tool) {
  2110. const auto & function = tool.at("function");
  2111. std::string name = function.at("name");
  2112. auto parameters = function.at("parameters");
  2113. builder.resolve_refs(parameters);
  2114. // Create rule for Seed-OSS function call format
  2115. std::string param_rules;
  2116. if (parameters.contains("properties")) {
  2117. for (const auto & [key, value] : parameters.at("properties").items()) {
  2118. param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
  2119. "\"</parameter>\"";
  2120. }
  2121. }
  2122. tool_rules.push_back(builder.add_rule(name + "-call",
  2123. "\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
  2124. param_rules +
  2125. " \"</function>\" space \"</seed:tool_call>\""));
  2126. });
  2127. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
  2128. data.preserved_tokens = {
  2129. "<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
  2130. "<function=", "</function>", "<parameter=", "</parameter>",
  2131. };
  2132. builder.add_rule("root", string_join(tool_rules, " | "));
  2133. });
  2134. }
  2135. return data;
  2136. }
  2137. static common_chat_params common_chat_templates_apply_jinja(
  2138. const struct common_chat_templates * tmpls,
  2139. const struct common_chat_templates_inputs & inputs)
  2140. {
  2141. templates_params params;
  2142. params.tools = common_chat_tools_to_json_oaicompat<json>(inputs.tools);
  2143. const auto & tmpl = params.tools.is_array() && tmpls->template_tool_use
  2144. ? *tmpls->template_tool_use
  2145. : *tmpls->template_default;
  2146. const auto & src = tmpl.source();
  2147. const auto & caps = tmpl.original_caps();
  2148. params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content);
  2149. params.add_generation_prompt = inputs.add_generation_prompt;
  2150. params.tool_choice = inputs.tool_choice;
  2151. params.enable_thinking = inputs.enable_thinking;
  2152. params.grammar = inputs.grammar;
  2153. params.now = inputs.now;
  2154. params.add_bos = tmpls->add_bos;
  2155. params.add_eos = tmpls->add_eos;
  2156. params.extra_context = json::object();
  2157. for (auto el : inputs.chat_template_kwargs) {
  2158. params.extra_context[el.first] = json::parse(el.second);
  2159. }
  2160. if (!inputs.json_schema.empty()) {
  2161. params.json_schema = json::parse(inputs.json_schema);
  2162. }
  2163. if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
  2164. LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
  2165. params.parallel_tool_calls = false;
  2166. } else {
  2167. params.parallel_tool_calls = inputs.parallel_tool_calls;
  2168. }
  2169. if (params.tools.is_array()) {
  2170. if (params.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && !params.grammar.empty()) {
  2171. throw std::runtime_error("Cannot specify grammar with tools");
  2172. }
  2173. if (caps.supports_tool_calls && !caps.supports_tools) {
  2174. LOG_WRN("Template supports tool calls but does not natively describe tools. The fallback behaviour used may produce bad results, inspect prompt w/ --verbose & consider overriding the template.\n");
  2175. }
  2176. }
  2177. // DeepSeek V3.1: detect based on specific patterns in the template
  2178. if (src.find("message['prefix'] is defined and message['prefix'] and thinking") != std::string::npos &&
  2179. params.json_schema.is_null()) {
  2180. return common_chat_params_init_deepseek_v3_1(tmpl, params);
  2181. }
  2182. // DeepSeek R1: use handler in all cases except json schema (thinking / tools).
  2183. if (src.find("<|tool▁calls▁begin|>") != std::string::npos && params.json_schema.is_null()) {
  2184. return common_chat_params_init_deepseek_r1(tmpl, params);
  2185. }
  2186. // Command R7B: : use handler in all cases except json schema (thinking / tools).
  2187. if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos && params.json_schema.is_null()) {
  2188. return common_chat_params_init_command_r7b(tmpl, params);
  2189. }
  2190. // Granite (IBM) - detects thinking / tools support
  2191. if (src.find("elif thinking") != std::string::npos && src.find("<|tool_call|>") != std::string::npos) {
  2192. return common_chat_params_init_granite(tmpl, params);
  2193. }
  2194. // GLM 4.5: detect by <arg_key> and <arg_value> tags (check before Hermes since both use <tool_call>)
  2195. if (src.find("[gMASK]<sop>") != std::string::npos &&
  2196. src.find("<arg_key>") != std::string::npos &&
  2197. src.find("<arg_value>") != std::string::npos &&
  2198. params.json_schema.is_null()) {
  2199. return common_chat_params_init_glm_4_5(tmpl, params);
  2200. }
  2201. // Qwen3-Coder XML format detection (must come before Hermes 2 Pro)
  2202. // Detect via explicit XML markers unique to Qwen3-Coder to avoid false positives in other templates.
  2203. // Require presence of <tool_call>, <function=...>, and <parameter=...> blocks.
  2204. if (src.find("<tool_call>") != std::string::npos &&
  2205. src.find("<function>") != std::string::npos &&
  2206. src.find("<function=") != std::string::npos &&
  2207. src.find("<parameters>") != std::string::npos &&
  2208. src.find("<parameter=") != std::string::npos) {
  2209. return common_chat_params_init_qwen3_coder_xml(tmpl, params);
  2210. }
  2211. // Xiaomi MiMo format detection (must come before Hermes 2 Pro)
  2212. if (src.find("<tools>") != std::string::npos &&
  2213. src.find("# Tools") != std::string::npos &&
  2214. src.find("</tools>") != std::string::npos &&
  2215. src.find("<tool_calls>") != std::string::npos &&
  2216. src.find("</tool_calls>") != std::string::npos &&
  2217. src.find("<tool_response>") != std::string::npos) {
  2218. return common_chat_params_init_xiaomi_mimo(tmpl, params);
  2219. }
  2220. // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
  2221. if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
  2222. return common_chat_params_init_hermes_2_pro(tmpl, params);
  2223. }
  2224. // GPT-OSS
  2225. if (src.find("<|channel|>") != std::string::npos) {
  2226. return common_chat_params_init_gpt_oss(tmpl, params);
  2227. }
  2228. // Seed-OSS
  2229. if (src.find("<seed:think>") != std::string::npos) {
  2230. return common_chat_params_init_seed_oss(tmpl, params, inputs);
  2231. }
  2232. // Nemotron v2
  2233. if (src.find("<SPECIAL_10>") != std::string::npos) {
  2234. return common_chat_params_init_nemotron_v2(tmpl, params);
  2235. }
  2236. // Apertus format detection
  2237. if (src.find("<|system_start|>") != std::string::npos && src.find("<|tools_prefix|>") != std::string::npos) {
  2238. return common_chat_params_init_apertus(tmpl, params);
  2239. }
  2240. // LFM2 (w/ tools)
  2241. if (src.find("List of tools: <|tool_list_start|>[") != std::string::npos &&
  2242. src.find("]<|tool_list_end|>") != std::string::npos) {
  2243. return common_chat_params_init_lfm2(tmpl, params);
  2244. }
  2245. // MiniMax-M2 format detection
  2246. if (src.find("]~!b[") != std::string::npos && src.find("]~b]") != std::string::npos) {
  2247. return common_chat_params_init_minimax_m2(tmpl, params);
  2248. }
  2249. // Kimi K2 format detection
  2250. if (src.find("<|im_system|>tool_declare<|im_middle|>") != std::string::npos &&
  2251. src.find("<|tool_calls_section_begin|>") != std::string::npos &&
  2252. src.find("## Return of") != std::string::npos) {
  2253. return common_chat_params_init_kimi_k2(tmpl, params);
  2254. }
  2255. // Apriel 1.5 format detection
  2256. if (src.find("<thinking>") != std::string::npos &&
  2257. src.find("</thinking>") != std::string::npos &&
  2258. src.find("<available_tools>") != std::string::npos &&
  2259. src.find("<|assistant|>") != std::string::npos &&
  2260. src.find("<|tool_result|>") != std::string::npos &&
  2261. src.find("<tool_calls>[") != std::string::npos &&
  2262. src.find("]</tool_calls>") != std::string::npos) {
  2263. return common_chat_params_init_apriel_1_5(tmpl, params);
  2264. }
  2265. // Use generic handler when mixing tools + JSON schema.
  2266. // TODO: support that mix in handlers below.
  2267. if ((params.tools.is_array() && params.json_schema.is_object())) {
  2268. return common_chat_params_init_generic(tmpl, params);
  2269. }
  2270. // Functionary prepends "all\n" to plain content outputs, so we use its handler in all cases.
  2271. if (src.find(">>>all") != std::string::npos) {
  2272. return common_chat_params_init_functionary_v3_2(tmpl, params);
  2273. }
  2274. // Firefunction v2 requires datetime and functions in the context even w/o tools, so we also use its handler in all cases.
  2275. if (src.find(" functools[") != std::string::npos) {
  2276. return common_chat_params_init_firefunction_v2(tmpl, params);
  2277. }
  2278. // Functionary v3.1 (w/ tools)
  2279. if (src.find("<|start_header_id|>") != std::string::npos
  2280. && src.find("<function=") != std::string::npos) {
  2281. return common_chat_params_init_functionary_v3_1_llama_3_1(tmpl, params);
  2282. }
  2283. // Llama 3.1, 3.2, 3.3 (also requires date_string so using it even w/o tools)
  2284. if (src.find("<|start_header_id|>ipython<|end_header_id|>") != std::string::npos) {
  2285. auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos;
  2286. return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools);
  2287. }
  2288. if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) {
  2289. return common_chat_params_init_magistral(tmpl, params);
  2290. }
  2291. // Plain handler (no tools)
  2292. if (params.tools.is_null() || inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_NONE) {
  2293. return common_chat_params_init_without_tools(tmpl, params);
  2294. }
  2295. // Mistral Nemo (w/ tools)
  2296. if (src.find("[TOOL_CALLS]") != std::string::npos) {
  2297. return common_chat_params_init_mistral_nemo(tmpl, params);
  2298. }
  2299. // Generic fallback
  2300. return common_chat_params_init_generic(tmpl, params);
  2301. }
  2302. // Legacy template route (adhoc C++ implementation of known templates), forward to llama_chat_apply_template.
  2303. static common_chat_params common_chat_templates_apply_legacy(
  2304. const struct common_chat_templates * tmpls,
  2305. const struct common_chat_templates_inputs & inputs)
  2306. {
  2307. size_t alloc_size = 0;
  2308. std::vector<llama_chat_message> chat;
  2309. std::vector<std::string> contents;
  2310. for (const auto & msg : inputs.messages) {
  2311. auto content = msg.content;
  2312. for (const auto & part : msg.content_parts) {
  2313. if (part.type != "text") {
  2314. LOG_WRN("Ignoring non-text content part: %s\n", part.type.c_str());
  2315. continue;
  2316. }
  2317. if (!content.empty()) {
  2318. content += "\n";;
  2319. }
  2320. content += part.text;
  2321. }
  2322. contents.emplace_back(std::move(content));
  2323. }
  2324. for (size_t i = 0; i < contents.size(); ++i) {
  2325. const auto & msg = inputs.messages[i];
  2326. const auto & content = contents[i];
  2327. chat.push_back({msg.role.c_str(), content.c_str()});
  2328. size_t msg_size = msg.role.size() + content.size();
  2329. alloc_size += msg_size + (msg_size / 4); // == msg_size * 1.25 but avoiding float ops
  2330. }
  2331. std::vector<char> buf(alloc_size);
  2332. // run the first time to get the total output length
  2333. const auto & src = tmpls->template_default->source();
  2334. int32_t res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2335. // error: chat template is not supported
  2336. if (res < 0) {
  2337. // if the custom "tmpl" is not supported, we throw an error
  2338. // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
  2339. throw std::runtime_error("this custom template is not supported, try using --jinja");
  2340. }
  2341. // if it turns out that our buffer is too small, we resize it
  2342. if ((size_t) res > buf.size()) {
  2343. buf.resize(res);
  2344. res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2345. }
  2346. // for safety, we check the result again
  2347. if (res < 0 || (size_t) res > buf.size()) {
  2348. throw std::runtime_error("failed to apply chat template, try using --jinja");
  2349. }
  2350. common_chat_params params;
  2351. params.prompt = std::string(buf.data(), res);
  2352. if (!inputs.json_schema.empty()) {
  2353. params.grammar = json_schema_to_grammar(json::parse(inputs.json_schema));
  2354. } else {
  2355. params.grammar = inputs.grammar;
  2356. }
  2357. return params;
  2358. }
  2359. common_chat_params common_chat_templates_apply(
  2360. const struct common_chat_templates * tmpls,
  2361. const struct common_chat_templates_inputs & inputs)
  2362. {
  2363. GGML_ASSERT(tmpls != nullptr);
  2364. return inputs.use_jinja
  2365. ? common_chat_templates_apply_jinja(tmpls, inputs)
  2366. : common_chat_templates_apply_legacy(tmpls, inputs);
  2367. }