chat.cpp 135 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176
  1. #include "chat.h"
  2. #include "chat-parser.h"
  3. #include "chat-peg-parser.h"
  4. #include "common.h"
  5. #include "json-partial.h"
  6. #include "json-schema-to-grammar.h"
  7. #include "log.h"
  8. #include "regex-partial.h"
  9. // #include <minja/chat-template.hpp>
  10. // #include <minja/minja.hpp>
  11. #include "jinja/parser.h"
  12. #include "jinja/value.h"
  13. #include "jinja/runtime.h"
  14. #include "jinja/caps.h"
  15. #include <algorithm>
  16. #include <cstdio>
  17. #include <cctype>
  18. #include <exception>
  19. #include <functional>
  20. #include <iostream>
  21. #include <optional>
  22. #include <stdexcept>
  23. #include <string>
  24. #include <vector>
  25. using json = nlohmann::ordered_json;
  26. static std::string format_time(const std::chrono::system_clock::time_point & now, const std::string & format) {
  27. auto time = std::chrono::system_clock::to_time_t(now);
  28. auto local_time = *std::localtime(&time);
  29. std::ostringstream ss;
  30. ss << std::put_time(&local_time, format.c_str());
  31. auto res = ss.str();
  32. return res;
  33. }
  34. static std::string string_diff(const std::string & last, const std::string & current) {
  35. if (last.empty()) {
  36. return current;
  37. }
  38. if (!string_starts_with(current, last)) {
  39. if (string_starts_with(last, current)) {
  40. // This happens if the last generation ended on a partial stop word (not erased),
  41. // and the current ended on a stop word (erased).
  42. return "";
  43. }
  44. throw std::runtime_error("Invalid diff: '" + last + "' not found at start of '" + current + "'");
  45. }
  46. return current.substr(last.size());
  47. }
  48. static bool has_content_or_tool_calls(const common_chat_msg & msg) {
  49. return !msg.content.empty() || !msg.tool_calls.empty();
  50. }
  51. template <>
  52. json common_chat_msg::to_json_oaicompat() const
  53. {
  54. json message {
  55. {"role", "assistant"},
  56. };
  57. if (!reasoning_content.empty()) {
  58. message["reasoning_content"] = reasoning_content;
  59. }
  60. if (content.empty() && !tool_calls.empty()) {
  61. message["content"] = json();
  62. } else {
  63. message["content"] = content;
  64. }
  65. if (!tool_calls.empty()) {
  66. auto arr = json::array();
  67. for (const auto & tc : tool_calls) {
  68. arr.push_back({
  69. {"type", "function"},
  70. {"function", {
  71. {"name", tc.name},
  72. {"arguments", tc.arguments},
  73. }},
  74. {"id", tc.id},
  75. // // Some templates generate and require an id (sometimes in a very specific format, e.g. Mistral Nemo).
  76. // // We only generate a random id for the ones that don't generate one by themselves
  77. // // (they also won't get to see it as their template likely doesn't use it, so it's all for the client)
  78. // {"id", tc.id.empty() ? gen_tool_call_id() : tc.id},
  79. });
  80. }
  81. message["tool_calls"] = arr;
  82. }
  83. return message;
  84. }
  85. std::vector<common_chat_msg_diff> common_chat_msg_diff::compute_diffs(const common_chat_msg & msg_prv, const common_chat_msg & msg_new) {
  86. std::vector<common_chat_msg_diff> diffs;
  87. if (msg_new.tool_calls.size() > msg_prv.tool_calls.size()) {
  88. diffs.reserve(msg_new.tool_calls.size() - msg_prv.tool_calls.size() + 3);
  89. } else {
  90. diffs.reserve(3);
  91. }
  92. // TODO: these can become expensive for long messages - how to optimize?
  93. if (msg_prv.reasoning_content != msg_new.reasoning_content) {
  94. auto & diff = diffs.emplace_back();
  95. diff.reasoning_content_delta = string_diff(msg_prv.reasoning_content, msg_new.reasoning_content);
  96. }
  97. if (msg_prv.content != msg_new.content) {
  98. auto & diff = diffs.emplace_back();
  99. diff.content_delta = string_diff(msg_prv.content, msg_new.content);
  100. }
  101. if (msg_new.tool_calls.size() < msg_prv.tool_calls.size()) {
  102. throw std::runtime_error("Invalid diff: now finding less tool calls!");
  103. }
  104. if (!msg_prv.tool_calls.empty()) {
  105. const auto idx = msg_prv.tool_calls.size() - 1;
  106. const auto & pref = msg_prv.tool_calls[idx];
  107. const auto & newf = msg_new.tool_calls[idx];
  108. if (pref.name != newf.name) {
  109. throw std::runtime_error("Invalid diff: tool call mismatch!");
  110. }
  111. const auto args_diff = string_diff(pref.arguments, newf.arguments);
  112. if (!args_diff.empty() || pref.id != newf.id) {
  113. auto & diff = diffs.emplace_back();
  114. diff.tool_call_index = idx;
  115. if (pref.id != newf.id) {
  116. diff.tool_call_delta.id = newf.id;
  117. diff.tool_call_delta.name = newf.name;
  118. }
  119. diff.tool_call_delta.arguments = args_diff;
  120. }
  121. }
  122. for (size_t idx = msg_prv.tool_calls.size(); idx < msg_new.tool_calls.size(); ++idx) {
  123. auto & diff = diffs.emplace_back();
  124. diff.tool_call_index = idx;
  125. diff.tool_call_delta = msg_new.tool_calls[idx];
  126. }
  127. return diffs;
  128. }
  129. using chat_template_caps = jinja::caps;
  130. struct common_chat_template {
  131. jinja::program prog;
  132. std::string bos_tok;
  133. std::string eos_tok;
  134. std::string src;
  135. chat_template_caps caps;
  136. common_chat_template(const std::string & src, const std::string & bos_token, const std::string & eos_token) {
  137. jinja::lexer lexer;
  138. auto lexer_res = lexer.tokenize(src);
  139. this->prog = jinja::parse_from_tokens(lexer_res);
  140. this->src = lexer_res.source;
  141. this->bos_tok = bos_token;
  142. this->eos_tok = eos_token;
  143. this->caps = jinja::caps_get(prog);
  144. // LOG_INF("%s: caps:\n%s\n", __func__, this->caps.to_string().c_str());
  145. }
  146. const std::string & source() const { return src; }
  147. const std::string & bos_token() const { return bos_tok; }
  148. const std::string & eos_token() const { return eos_tok; }
  149. // TODO: this is ugly, refactor it somehow
  150. json add_system(const json & messages, const std::string & system_prompt) const {
  151. GGML_ASSERT(messages.is_array());
  152. auto msgs_copy = messages;
  153. if (!caps.supports_system_role) {
  154. if (msgs_copy.empty()) {
  155. msgs_copy.insert(msgs_copy.begin(), json{
  156. {"role", "user"},
  157. {"content", system_prompt}
  158. });
  159. } else {
  160. auto & first_msg = msgs_copy[0];
  161. if (!first_msg.contains("content")) {
  162. first_msg["content"] = "";
  163. }
  164. first_msg["content"] = system_prompt + "\n\n"
  165. + first_msg["content"].get<std::string>();
  166. }
  167. } else {
  168. if (msgs_copy.empty() || msgs_copy[0].at("role") != "system") {
  169. msgs_copy.insert(msgs_copy.begin(), json{
  170. {"role", "system"},
  171. {"content", system_prompt}
  172. });
  173. } else if (msgs_copy[0].at("role") == "system") {
  174. msgs_copy[0]["content"] = system_prompt;
  175. }
  176. }
  177. return msgs_copy;
  178. }
  179. chat_template_caps original_caps() const {
  180. return caps;
  181. }
  182. };
  183. struct common_chat_templates {
  184. bool add_bos;
  185. bool add_eos;
  186. bool has_explicit_template; // Model had builtin template or template overridde was specified.
  187. std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
  188. std::unique_ptr<common_chat_template> template_tool_use;
  189. };
  190. struct templates_params {
  191. json messages;
  192. json tools;
  193. common_chat_tool_choice tool_choice;
  194. json json_schema;
  195. bool parallel_tool_calls;
  196. common_reasoning_format reasoning_format;
  197. bool stream;
  198. std::string grammar;
  199. bool add_generation_prompt = true;
  200. bool enable_thinking = true;
  201. std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
  202. json extra_context;
  203. bool add_bos;
  204. bool add_eos;
  205. bool is_inference = true;
  206. bool mark_input = true; // whether to mark input strings in the jinja context
  207. };
  208. common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice) {
  209. if (tool_choice == "auto") {
  210. return COMMON_CHAT_TOOL_CHOICE_AUTO;
  211. }
  212. if (tool_choice == "none") {
  213. return COMMON_CHAT_TOOL_CHOICE_NONE;
  214. }
  215. if (tool_choice == "required") {
  216. return COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  217. }
  218. throw std::invalid_argument("Invalid tool_choice: " + tool_choice);
  219. }
  220. bool common_chat_templates_support_enable_thinking(const common_chat_templates * chat_templates) {
  221. common_chat_templates_inputs dummy_inputs;
  222. common_chat_msg msg;
  223. msg.role = "user";
  224. msg.content = "test";
  225. dummy_inputs.messages = {msg};
  226. dummy_inputs.enable_thinking = false;
  227. const auto rendered_no_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  228. dummy_inputs.enable_thinking = true;
  229. const auto rendered_with_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
  230. return rendered_no_thinking.prompt != rendered_with_thinking.prompt;
  231. }
  232. template <>
  233. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const json & messages) {
  234. std::vector<common_chat_msg> msgs;
  235. try {
  236. if (!messages.is_array()) {
  237. throw std::invalid_argument("Expected 'messages' to be an array, got " + messages.dump());
  238. }
  239. for (const auto & message : messages) {
  240. if (!message.is_object()) {
  241. throw std::invalid_argument("Expected 'message' to be an object, got " + message.dump());
  242. }
  243. common_chat_msg msg;
  244. if (!message.contains("role")) {
  245. throw std::invalid_argument("Missing 'role' in message: " + message.dump());
  246. }
  247. msg.role = message.at("role");
  248. auto has_content = message.contains("content");
  249. auto has_tool_calls = message.contains("tool_calls");
  250. if (has_content) {
  251. const auto & content = message.at("content");
  252. if (content.is_string()) {
  253. msg.content = content;
  254. } else if (content.is_array()) {
  255. for (const auto & part : content) {
  256. if (!part.contains("type")) {
  257. throw std::invalid_argument("Missing content part type: " + part.dump());
  258. }
  259. const auto & type = part.at("type");
  260. if (type != "text") {
  261. throw std::invalid_argument("Unsupported content part type: " + type.dump());
  262. }
  263. common_chat_msg_content_part msg_part;
  264. msg_part.type = type;
  265. msg_part.text = part.at("text");
  266. msg.content_parts.push_back(msg_part);
  267. }
  268. } else if (!content.is_null()) {
  269. throw std::invalid_argument("Invalid 'content' type: expected string or array, got " + content.dump() + " (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
  270. }
  271. }
  272. if (has_tool_calls) {
  273. for (const auto & tool_call : message.at("tool_calls")) {
  274. common_chat_tool_call tc;
  275. if (!tool_call.contains("type")) {
  276. throw std::invalid_argument("Missing tool call type: " + tool_call.dump());
  277. }
  278. const auto & type = tool_call.at("type");
  279. if (type != "function") {
  280. throw std::invalid_argument("Unsupported tool call type: " + tool_call.dump());
  281. }
  282. if (!tool_call.contains("function")) {
  283. throw std::invalid_argument("Missing tool call function: " + tool_call.dump());
  284. }
  285. const auto & fc = tool_call.at("function");
  286. if (!fc.contains("name")) {
  287. throw std::invalid_argument("Missing tool call name: " + tool_call.dump());
  288. }
  289. tc.name = fc.at("name");
  290. tc.arguments = fc.at("arguments");
  291. if (tool_call.contains("id")) {
  292. tc.id = tool_call.at("id");
  293. }
  294. msg.tool_calls.push_back(tc);
  295. }
  296. }
  297. if (!has_content && !has_tool_calls) {
  298. throw std::invalid_argument("Expected 'content' or 'tool_calls' (ref: https://github.com/ggml-org/llama.cpp/issues/8367 & https://github.com/ggml-org/llama.cpp/issues/12279)");
  299. }
  300. if (message.contains("reasoning_content")) {
  301. msg.reasoning_content = message.at("reasoning_content");
  302. }
  303. if (message.contains("name")) {
  304. msg.tool_name = message.at("name");
  305. }
  306. if (message.contains("tool_call_id")) {
  307. msg.tool_call_id = message.at("tool_call_id");
  308. }
  309. msgs.push_back(msg);
  310. }
  311. } catch (const std::exception & e) {
  312. // @ngxson : disable otherwise it's bloating the API response
  313. // printf("%s\n", std::string("; messages = ") + messages.dump(2));
  314. throw std::runtime_error("Failed to parse messages: " + std::string(e.what()));
  315. }
  316. return msgs;
  317. }
  318. template <>
  319. json common_chat_msgs_to_json_oaicompat(const std::vector<common_chat_msg> & msgs, bool concat_typed_text) {
  320. json messages = json::array();
  321. for (const auto & msg : msgs) {
  322. if (!msg.content.empty() && !msg.content_parts.empty()) {
  323. throw std::runtime_error("Cannot specify both content and content_parts");
  324. }
  325. json jmsg {
  326. {"role", msg.role},
  327. };
  328. if (!msg.content.empty()) {
  329. jmsg["content"] = msg.content;
  330. } else if (!msg.content_parts.empty()) {
  331. if (concat_typed_text) {
  332. std::string text;
  333. for (const auto & part : msg.content_parts) {
  334. if (part.type != "text") {
  335. LOG_WRN("Ignoring content part type: %s\n", part.type.c_str());
  336. continue;
  337. }
  338. if (!text.empty()) {
  339. text += '\n';
  340. }
  341. text += part.text;
  342. }
  343. jmsg["content"] = text;
  344. } else {
  345. auto & parts = jmsg["content"] = json::array();
  346. for (const auto & part : msg.content_parts) {
  347. parts.push_back({
  348. {"type", part.type},
  349. {"text", part.text},
  350. });
  351. }
  352. }
  353. } else {
  354. jmsg["content"] = "";
  355. }
  356. if (!msg.reasoning_content.empty()) {
  357. jmsg["reasoning_content"] = msg.reasoning_content;
  358. }
  359. if (!msg.tool_name.empty()) {
  360. jmsg["name"] = msg.tool_name;
  361. }
  362. if (!msg.tool_call_id.empty()) {
  363. jmsg["tool_call_id"] = msg.tool_call_id;
  364. }
  365. if (!msg.tool_calls.empty()) {
  366. auto & tool_calls = jmsg["tool_calls"] = json::array();
  367. for (const auto & tool_call : msg.tool_calls) {
  368. json tc {
  369. {"type", "function"},
  370. {"function", {
  371. {"name", tool_call.name},
  372. {"arguments", tool_call.arguments},
  373. }},
  374. };
  375. if (!tool_call.id.empty()) {
  376. tc["id"] = tool_call.id;
  377. }
  378. tool_calls.push_back(tc);
  379. }
  380. }
  381. messages.push_back(jmsg);
  382. }
  383. return messages;
  384. }
  385. template <>
  386. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const std::string & messages) {
  387. return common_chat_msgs_parse_oaicompat(json::parse(messages));
  388. }
  389. template <>
  390. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const json & tools) {
  391. std::vector<common_chat_tool> result;
  392. try {
  393. if (!tools.is_null()) {
  394. if (!tools.is_array()) {
  395. throw std::invalid_argument("Expected 'tools' to be an array, got " + tools.dump());
  396. }
  397. for (const auto & tool : tools) {
  398. if (!tool.contains("type")) {
  399. throw std::invalid_argument("Missing tool type: " + tool.dump());
  400. }
  401. const auto & type = tool.at("type");
  402. if (!type.is_string() || type != "function") {
  403. throw std::invalid_argument("Unsupported tool type: " + tool.dump());
  404. }
  405. if (!tool.contains("function")) {
  406. throw std::invalid_argument("Missing tool function: " + tool.dump());
  407. }
  408. const auto & function = tool.at("function");
  409. result.push_back({
  410. /* .name = */ function.at("name"),
  411. /* .description = */ function.value("description", ""),
  412. /* .parameters = */ function.value("parameters", json::object()).dump(),
  413. });
  414. }
  415. }
  416. } catch (const std::exception & e) {
  417. throw std::runtime_error("Failed to parse tools: " + std::string(e.what()) + "; tools = " + tools.dump(2));
  418. }
  419. return result;
  420. }
  421. template <>
  422. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const std::string & tools) {
  423. return common_chat_tools_parse_oaicompat(json::parse(tools));
  424. }
  425. template <>
  426. json common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools) {
  427. if (tools.empty()) {
  428. return json();
  429. }
  430. auto result = json::array();
  431. for (const auto & tool : tools) {
  432. result.push_back({
  433. {"type", "function"},
  434. {"function", {
  435. {"name", tool.name},
  436. {"description", tool.description},
  437. {"parameters", json::parse(tool.parameters)},
  438. }},
  439. });
  440. }
  441. return result;
  442. }
  443. template <> json common_chat_msg_diff_to_json_oaicompat(const common_chat_msg_diff & diff) {
  444. json delta = json::object();
  445. if (!diff.reasoning_content_delta.empty()) {
  446. delta["reasoning_content"] = diff.reasoning_content_delta;
  447. }
  448. if (!diff.content_delta.empty()) {
  449. delta["content"] = diff.content_delta;
  450. }
  451. if (diff.tool_call_index != std::string::npos) {
  452. json tool_call;
  453. tool_call["index"] = diff.tool_call_index;
  454. if (!diff.tool_call_delta.id.empty()) {
  455. tool_call["id"] = diff.tool_call_delta.id;
  456. tool_call["type"] = "function";
  457. }
  458. json function = json::object();
  459. if (!diff.tool_call_delta.name.empty()) {
  460. function["name"] = diff.tool_call_delta.name;
  461. }
  462. function["arguments"] = diff.tool_call_delta.arguments;
  463. tool_call["function"] = function;
  464. delta["tool_calls"] = json::array({tool_call});
  465. }
  466. return delta;
  467. }
  468. bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
  469. if (use_jinja) {
  470. try {
  471. common_chat_msg msg;
  472. msg.role = "user";
  473. msg.content = "test";
  474. auto tmpls = common_chat_templates_init(/* model= */ nullptr, tmpl);
  475. common_chat_templates_inputs inputs;
  476. inputs.messages = {msg};
  477. common_chat_templates_apply(tmpls.get(), inputs);
  478. return true;
  479. } catch (const std::exception & e) {
  480. LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what());
  481. return false;
  482. }
  483. }
  484. llama_chat_message chat[] = {{"user", "test"}};
  485. const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
  486. return res >= 0;
  487. }
  488. std::string common_chat_format_single(
  489. const struct common_chat_templates * tmpls,
  490. const std::vector<common_chat_msg> & past_msg,
  491. const common_chat_msg & new_msg,
  492. bool add_ass,
  493. bool use_jinja) {
  494. common_chat_templates_inputs inputs;
  495. inputs.use_jinja = use_jinja;
  496. inputs.add_bos = tmpls->add_bos;
  497. inputs.add_eos = tmpls->add_eos;
  498. std::string fmt_past_msg;
  499. if (!past_msg.empty()) {
  500. inputs.messages = past_msg;
  501. inputs.add_generation_prompt = false;
  502. fmt_past_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  503. }
  504. std::ostringstream ss;
  505. // if the past_msg ends with a newline, we must preserve it in the formatted version
  506. if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
  507. ss << "\n";
  508. };
  509. // format chat with new_msg
  510. inputs.messages.push_back(new_msg);
  511. inputs.add_generation_prompt = add_ass;
  512. auto fmt_new_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  513. // get the diff part
  514. ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
  515. return ss.str();
  516. }
  517. std::string common_chat_format_example(const struct common_chat_templates * tmpls, bool use_jinja, const std::map<std::string, std::string> & chat_template_kwargs) {
  518. common_chat_templates_inputs inputs;
  519. inputs.use_jinja = use_jinja;
  520. inputs.add_bos = tmpls->add_bos;
  521. inputs.add_eos = tmpls->add_eos;
  522. inputs.chat_template_kwargs = chat_template_kwargs;
  523. auto add_simple_msg = [&](auto role, auto content) {
  524. common_chat_msg msg;
  525. msg.role = role;
  526. msg.content = content;
  527. inputs.messages.push_back(msg);
  528. };
  529. add_simple_msg("system", "You are a helpful assistant");
  530. add_simple_msg("user", "Hello");
  531. add_simple_msg("assistant", "Hi there");
  532. add_simple_msg("user", "How are you?");
  533. return common_chat_templates_apply(tmpls, inputs).prompt;
  534. }
  535. #define CHATML_TEMPLATE_SRC \
  536. "{%- for message in messages -%}\n" \
  537. " {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
  538. "{%- endfor -%}\n" \
  539. "{%- if add_generation_prompt -%}\n" \
  540. " {{- '<|im_start|>assistant\n' -}}\n" \
  541. "{%- endif -%}"
  542. void common_chat_templates_free(struct common_chat_templates * tmpls) {
  543. delete tmpls;
  544. }
  545. bool common_chat_templates_was_explicit(const struct common_chat_templates * tmpls) {
  546. return tmpls->has_explicit_template;
  547. }
  548. std::string common_chat_templates_source(const struct common_chat_templates * tmpls, const std::string & variant) {
  549. if (!variant.empty()) {
  550. if (variant == "tool_use") {
  551. if (tmpls->template_tool_use) {
  552. return tmpls->template_tool_use->source();
  553. }
  554. return "";
  555. } else {
  556. LOG_DBG("%s: unknown template variant: %s\n", __func__, variant.c_str());
  557. }
  558. }
  559. return tmpls->template_default->source();
  560. }
  561. common_chat_templates_ptr common_chat_templates_init(
  562. const struct llama_model * model,
  563. const std::string & chat_template_override,
  564. const std::string & bos_token_override,
  565. const std::string & eos_token_override)
  566. {
  567. std::string default_template_src;
  568. std::string template_tool_use_src;
  569. bool has_explicit_template = !chat_template_override.empty();
  570. if (chat_template_override.empty()) {
  571. GGML_ASSERT(model != nullptr);
  572. const auto * str = llama_model_chat_template(model, /* name */ nullptr);
  573. if (str) {
  574. default_template_src = str;
  575. has_explicit_template = true;
  576. }
  577. str = llama_model_chat_template(model, /* name */ "tool_use");
  578. if (str) {
  579. template_tool_use_src = str;
  580. has_explicit_template = true;
  581. }
  582. } else {
  583. default_template_src = chat_template_override;
  584. }
  585. if (default_template_src.empty() || default_template_src == "chatml") {
  586. if (!template_tool_use_src.empty()) {
  587. default_template_src = template_tool_use_src;
  588. } else {
  589. default_template_src = CHATML_TEMPLATE_SRC;
  590. }
  591. }
  592. // TODO @ngxson : this is a temporary hack to prevent chat template from throwing an error
  593. // Ref: https://github.com/ggml-org/llama.cpp/pull/15230#issuecomment-3173959633
  594. if (default_template_src.find("<|channel|>") != std::string::npos
  595. // search for the error message and patch it
  596. && default_template_src.find("in message.content or") != std::string::npos) {
  597. string_replace_all(default_template_src,
  598. "{%- if \"<|channel|>analysis<|message|>\" in message.content or \"<|channel|>final<|message|>\" in message.content %}",
  599. "{%- if false %}");
  600. }
  601. // TODO @aldehir : this is a temporary fix, pending Minja changes
  602. // Ref: https://github.com/ggml-org/llama.cpp/pull/17713#issuecomment-3631342664
  603. if (default_template_src.find("[TOOL_CALLS]") != std::string::npos
  604. // search for the error message and patch it
  605. && default_template_src.find("if (message['content'] is none or") != std::string::npos) {
  606. string_replace_all(default_template_src,
  607. "{%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}",
  608. "{%- if false %}");
  609. }
  610. std::string token_bos = bos_token_override;
  611. std::string token_eos = eos_token_override;
  612. bool add_bos = false;
  613. bool add_eos = false;
  614. if (model) {
  615. const auto * vocab = llama_model_get_vocab(model);
  616. const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
  617. if (token == LLAMA_TOKEN_NULL) {
  618. if (default_template_src.find(jinja_variable_name) != std::string::npos
  619. || template_tool_use_src.find(jinja_variable_name) != std::string::npos) {
  620. LOG_WRN("common_chat_templates_init: warning: vocab does not have a %s token, jinja template won't work as intended.\n", name);
  621. }
  622. return std::string();
  623. }
  624. return common_token_to_piece(vocab, token, true);
  625. };
  626. token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
  627. token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
  628. add_bos = llama_vocab_get_add_bos(vocab);
  629. add_eos = llama_vocab_get_add_eos(vocab);
  630. }
  631. common_chat_templates_ptr tmpls(new common_chat_templates());
  632. tmpls->has_explicit_template = has_explicit_template;
  633. tmpls->add_bos = add_bos;
  634. tmpls->add_eos = add_eos;
  635. try {
  636. tmpls->template_default = std::make_unique<common_chat_template>(default_template_src, token_bos, token_eos);
  637. } catch (const std::exception & e) {
  638. LOG_ERR("%s: error: %s\n", __func__, e.what());
  639. LOG_ERR("%s: failed to initialize chat template\n", __func__);
  640. LOG_ERR("%s: please consider disabling jinja via --no-jinja, or using another chat template\n", __func__);
  641. throw e;
  642. }
  643. if (!template_tool_use_src.empty()) {
  644. try {
  645. tmpls->template_tool_use = std::make_unique<common_chat_template>(template_tool_use_src, token_bos, token_eos);
  646. } catch (const std::exception & e) {
  647. LOG_ERR("%s: failed to parse tool use chat template (ignoring it): %s\n", __func__, e.what());
  648. }
  649. }
  650. return tmpls;
  651. }
  652. const char * common_chat_format_name(common_chat_format format) {
  653. switch (format) {
  654. case COMMON_CHAT_FORMAT_CONTENT_ONLY: return "Content-only";
  655. case COMMON_CHAT_FORMAT_GENERIC: return "Generic";
  656. case COMMON_CHAT_FORMAT_MISTRAL_NEMO: return "Mistral Nemo";
  657. case COMMON_CHAT_FORMAT_MAGISTRAL: return "Magistral";
  658. case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x";
  659. case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools";
  660. case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1";
  661. case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2";
  662. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2";
  663. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1";
  664. case COMMON_CHAT_FORMAT_DEEPSEEK_V3_1: return "DeepSeek V3.1";
  665. case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro";
  666. case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
  667. case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
  668. case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
  669. case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
  670. case COMMON_CHAT_FORMAT_NEMOTRON_V2: return "Nemotron V2";
  671. case COMMON_CHAT_FORMAT_APERTUS: return "Apertus";
  672. case COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS: return "LFM2 with JSON tools";
  673. case COMMON_CHAT_FORMAT_MINIMAX_M2: return "MiniMax-M2";
  674. case COMMON_CHAT_FORMAT_GLM_4_5: return "GLM 4.5";
  675. case COMMON_CHAT_FORMAT_KIMI_K2: return "Kimi K2";
  676. case COMMON_CHAT_FORMAT_QWEN3_CODER_XML: return "Qwen3 Coder";
  677. case COMMON_CHAT_FORMAT_APRIEL_1_5: return "Apriel 1.5";
  678. case COMMON_CHAT_FORMAT_XIAOMI_MIMO: return "Xiaomi MiMo";
  679. case COMMON_CHAT_FORMAT_SOLAR_OPEN: return "Solar Open";
  680. case COMMON_CHAT_FORMAT_EXAONE_MOE: return "EXAONE MoE";
  681. case COMMON_CHAT_FORMAT_PEG_SIMPLE: return "peg-simple";
  682. case COMMON_CHAT_FORMAT_PEG_NATIVE: return "peg-native";
  683. case COMMON_CHAT_FORMAT_PEG_CONSTRUCTED: return "peg-constructed";
  684. default:
  685. throw std::runtime_error("Unknown chat format");
  686. }
  687. }
  688. const char * common_reasoning_format_name(common_reasoning_format format) {
  689. switch (format) {
  690. case COMMON_REASONING_FORMAT_NONE: return "none";
  691. case COMMON_REASONING_FORMAT_AUTO: return "auto";
  692. case COMMON_REASONING_FORMAT_DEEPSEEK: return "deepseek";
  693. case COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY: return "deepseek-legacy";
  694. default:
  695. throw std::runtime_error("Unknown reasoning format");
  696. }
  697. }
  698. common_reasoning_format common_reasoning_format_from_name(const std::string & format) {
  699. if (format == "none") {
  700. return COMMON_REASONING_FORMAT_NONE;
  701. } else if (format == "auto") {
  702. return COMMON_REASONING_FORMAT_AUTO;
  703. } else if (format == "deepseek") {
  704. return COMMON_REASONING_FORMAT_DEEPSEEK;
  705. } else if (format == "deepseek-legacy") {
  706. return COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY;
  707. }
  708. throw std::runtime_error("Unknown reasoning format: " + format);
  709. }
  710. static void foreach_function(const json & tools, const std::function<void(const json &)> & fn) {
  711. for (const auto & tool : tools) {
  712. if (!tool.contains("type") || tool.at("type") != "function" || !tool.contains("function")) {
  713. LOG_INF("Skipping tool without function: %s", tool.dump(2).c_str());
  714. continue;
  715. }
  716. fn(tool);
  717. }
  718. }
  719. static void foreach_parameter(const json & function, const std::function<void(const std::string &, const json &, bool)> & fn) {
  720. if (!function.contains("parameters") || !function.at("parameters").is_object()) {
  721. return;
  722. }
  723. const auto & params = function.at("parameters");
  724. if (!params.contains("properties") || !params.at("properties").is_object()) {
  725. return;
  726. }
  727. const auto & props = params.at("properties");
  728. std::set<std::string> required;
  729. if (params.contains("required") && params.at("required").is_array()) {
  730. params.at("required").get_to(required);
  731. }
  732. for (const auto & [name, prop] : props.items()) {
  733. bool is_required = (required.find(name) != required.end());
  734. fn(name, prop, is_required);
  735. }
  736. }
  737. static std::string apply(
  738. const common_chat_template & tmpl,
  739. const struct templates_params & inputs,
  740. const std::optional<json> & messages_override = std::nullopt,
  741. const std::optional<json> & tools_override = std::nullopt,
  742. const std::optional<json> & additional_context = std::nullopt)
  743. {
  744. jinja::context ctx(tmpl.source());
  745. nlohmann::ordered_json inp = nlohmann::ordered_json{
  746. {"messages", messages_override.has_value() ? *messages_override : inputs.messages},
  747. {"tools", tools_override.has_value() ? *tools_override : inputs.tools},
  748. {"bos_token", tmpl.bos_token()},
  749. {"eos_token", tmpl.eos_token()},
  750. };
  751. if (inputs.extra_context.is_object()) {
  752. // TODO: do we need to merge, or replacing is fine?
  753. for (const auto & [k, v] : inputs.extra_context.items()) {
  754. inp[k] = v;
  755. }
  756. }
  757. if (additional_context.has_value()) {
  758. // TODO: merge properly instead of overwriting (matching old behavior)
  759. for (const auto & [k, v] : additional_context->items()) {
  760. inp[k] = v;
  761. }
  762. }
  763. if (inputs.add_generation_prompt) {
  764. inp["add_generation_prompt"] = true;
  765. }
  766. if (inp["tools"].is_null()) {
  767. inp["tools"] = json::array();
  768. }
  769. jinja::global_from_json(ctx, inp, inputs.mark_input);
  770. // render
  771. jinja::runtime runtime(ctx);
  772. const jinja::value results = runtime.execute(tmpl.prog);
  773. auto parts = runtime.gather_string_parts(results);
  774. std::string result = parts->as_string().str();
  775. // TODO: improve this later
  776. if (inputs.add_bos && string_starts_with(result, tmpl.bos_token())) {
  777. result = result.substr(tmpl.bos_token().size());
  778. }
  779. if (inputs.add_eos && string_ends_with(result, tmpl.eos_token())) {
  780. result = result.substr(0, result.size() - tmpl.eos_token().size());
  781. }
  782. return result;
  783. }
  784. static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct templates_params & inputs) {
  785. common_chat_params data;
  786. auto tool_call_schemas = json::array();
  787. foreach_function(inputs.tools, [&](const json & tool) {
  788. const auto & function = tool.at("function");
  789. auto tool_schema = json {
  790. {"type", "object"},
  791. {"properties", {
  792. {"name", {
  793. {"type", "string"},
  794. {"const", function.at("name")},
  795. }},
  796. {"arguments", function.at("parameters")},
  797. }},
  798. {"required", json::array({"name", "arguments"})},
  799. };
  800. if (function.contains("description")) {
  801. tool_schema["description"] = function.at("description");
  802. }
  803. if (inputs.parallel_tool_calls) {
  804. tool_schema.at("properties")["id"] = {
  805. {"type", "string"},
  806. {"minLength", 4},
  807. };
  808. tool_schema.at("required").push_back("id");
  809. }
  810. tool_call_schemas.emplace_back(tool_schema);
  811. });
  812. const auto tool_call =
  813. inputs.parallel_tool_calls
  814. ? json {
  815. {"type", "object"},
  816. {"properties", {
  817. {"tool_calls", {
  818. {"type", "array"},
  819. {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  820. {"anyOf", tool_call_schemas},
  821. }},
  822. {"minItems", 1},
  823. }},
  824. }},
  825. {"required", json::array({"tool_calls"})},
  826. }
  827. : json {
  828. {"type", "object"},
  829. {"properties", {
  830. {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  831. {"anyOf", tool_call_schemas},
  832. }},
  833. }},
  834. {"required", json::array({"tool_call"})},
  835. };
  836. const auto schema =
  837. inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED
  838. ? json {
  839. {"anyOf", json::array({
  840. tool_call,
  841. {
  842. {"type", "object"},
  843. {"properties", {
  844. {"response", inputs.json_schema.is_null()
  845. ? json {{"type", "string"}}
  846. : inputs.json_schema
  847. },
  848. }},
  849. {"required", json::array({"response"})},
  850. },
  851. })}
  852. }
  853. : tool_call;
  854. data.grammar_lazy = false;
  855. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  856. builder.add_schema("root", schema);
  857. });
  858. auto tweaked_messages = tmpl.add_system(
  859. inputs.messages,
  860. "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request");
  861. // ensure all messages has "content" field
  862. for (auto & message : tweaked_messages) {
  863. if (!message.contains("content") || message["content"].is_null()) {
  864. message["content"] = "";
  865. }
  866. }
  867. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  868. data.format = COMMON_CHAT_FORMAT_GENERIC;
  869. return data;
  870. }
  871. static common_chat_params common_chat_params_init_mistral_nemo(const common_chat_template & tmpl, const struct templates_params & inputs) {
  872. common_chat_params data;
  873. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  874. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  875. auto schemas = json::array();
  876. foreach_function(inputs.tools, [&](const json & tool) {
  877. const auto & function = tool.at("function");
  878. schemas.push_back({
  879. {"type", "object"},
  880. {"properties", {
  881. // Important note: the model is probably trained to take a JSON stringified arguments value.
  882. // It's hard to constrain that for now (while reusing the JSON schema conversion), so we're just expecting a plain object.
  883. {"name", {
  884. {"type", "string"},
  885. {"const", function.at("name")},
  886. }},
  887. {"arguments", function.at("parameters")},
  888. {"id", {
  889. {"type", "string"},
  890. // Nemo's template expects a 9-character alphanumeric ID.
  891. {"pattern", "^[a-zA-Z0-9]{9}$"},
  892. }},
  893. }},
  894. {"required", json::array({"name", "arguments", "id"})},
  895. });
  896. });
  897. auto schema = json {
  898. {"type", "array"},
  899. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  900. {"minItems", 1},
  901. };
  902. if (!inputs.parallel_tool_calls) {
  903. schema["maxItems"] = 1;
  904. }
  905. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  906. });
  907. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  908. data.preserved_tokens = {
  909. "[TOOL_CALLS]",
  910. };
  911. data.prompt = apply(tmpl, inputs);
  912. data.format = COMMON_CHAT_FORMAT_MISTRAL_NEMO;
  913. return data;
  914. }
  915. // Case-insensitive find
  916. static size_t ifind_string(const std::string & haystack, const std::string & needle, size_t pos = 0) {
  917. auto it = std::search(
  918. haystack.begin() + pos, haystack.end(),
  919. needle.begin(), needle.end(),
  920. [](char a, char b) { return std::tolower(a) == std::tolower(b); }
  921. );
  922. return (it == haystack.end()) ? std::string::npos : std::distance(haystack.begin(), it);
  923. }
  924. static common_chat_params common_chat_params_init_lfm2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  925. common_chat_params data;
  926. const auto is_json_schema_provided = !inputs.json_schema.is_null();
  927. const auto is_grammar_provided = !inputs.grammar.empty();
  928. const auto are_tools_provided = inputs.tools.is_array() && !inputs.tools.empty();
  929. // the logic requires potentially modifying the messages
  930. auto tweaked_messages = inputs.messages;
  931. auto replace_json_schema_marker = [](json & messages) -> bool {
  932. static std::string marker1 = "force json schema.\n";
  933. static std::string marker2 = "force json schema.";
  934. if (messages.empty() || messages.at(0).at("role") != "system") {
  935. return false;
  936. }
  937. std::string content = messages.at(0).at("content");
  938. for (const auto & marker : {marker1, marker2}) {
  939. const auto pos = ifind_string(content, marker);
  940. if (pos != std::string::npos) {
  941. content.replace(pos, marker.length(), "");
  942. // inject modified content back into the messages
  943. messages.at(0).at("content") = content;
  944. return true;
  945. }
  946. }
  947. return false;
  948. };
  949. // Lfm2 model does not natively work with json, but can generally understand the tools structure
  950. //
  951. // Example of the pytorch dialog structure:
  952. // <|startoftext|><|im_start|>system
  953. // List of tools: <|tool_list_start|>[{"name": "get_candidate_status", "description": "Retrieves the current status of a candidate in the recruitment process", "parameters": {"type": "object", "properties": {"candidate_id": {"type": "string", "description": "Unique identifier for the candidate"}}, "required": ["candidate_id"]}}]<|tool_list_end|><|im_end|>
  954. // <|im_start|>user
  955. // What is the current status of candidate ID 12345?<|im_end|>
  956. // <|im_start|>assistant
  957. // <|tool_call_start|>[get_candidate_status(candidate_id="12345")]<|tool_call_end|>Checking the current status of candidate ID 12345.<|im_end|>
  958. // <|im_start|>tool
  959. // <|tool_response_start|>{"candidate_id": "12345", "status": "Interview Scheduled", "position": "Clinical Research Associate", "date": "2023-11-20"}<|tool_response_end|><|im_end|>
  960. // <|im_start|>assistant
  961. // The candidate with ID 12345 is currently in the "Interview Scheduled" stage for the position of Clinical Research Associate, with an interview date set for 2023-11-20.<|im_end|>
  962. //
  963. // For the llama server compatibility with json tools semantic,
  964. // the client can add "Follow json schema." line into the system message prompt to force the json output.
  965. //
  966. if (are_tools_provided && (is_json_schema_provided || is_grammar_provided)) {
  967. // server/utils.hpp prohibits that branch for the custom grammar anyways
  968. throw std::runtime_error("Tools call must not use \"json_schema\" or \"grammar\", use non-tool invocation if you want to use custom grammar");
  969. } else if (are_tools_provided && replace_json_schema_marker(tweaked_messages)) {
  970. LOG_INF("%s: Using tools to build a grammar\n", __func__);
  971. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  972. auto schemas = json::array();
  973. foreach_function(inputs.tools, [&](const json & tool) {
  974. const auto & function = tool.at("function");
  975. schemas.push_back({
  976. {"type", "object"},
  977. {"properties", {
  978. {"name", {
  979. {"type", "string"},
  980. {"const", function.at("name")},
  981. }},
  982. {"arguments", function.at("parameters")},
  983. }},
  984. {"required", json::array({"name", "arguments", "id"})},
  985. });
  986. });
  987. auto schema = json {
  988. {"type", "array"},
  989. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  990. {"minItems", 1},
  991. };
  992. if (!inputs.parallel_tool_calls) {
  993. schema["maxItems"] = 1;
  994. }
  995. builder.add_rule("root", "\"<|tool_call_start|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tool_call_end|>\"");
  996. });
  997. // model has no concept of tool selection mode choice,
  998. // if the system prompt rendered correctly it will produce a tool call
  999. // the grammar goes inside the tool call body
  1000. data.grammar_lazy = true;
  1001. data.grammar_triggers = {{COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL, "\\s*<\\|tool_call_start\\|>\\s*\\["}};
  1002. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  1003. data.format = COMMON_CHAT_FORMAT_LFM2_WITH_JSON_TOOLS;
  1004. } else if (are_tools_provided && (!is_json_schema_provided && !is_grammar_provided)) {
  1005. LOG_INF("%s: Using tools without json schema or grammar\n", __func__);
  1006. // output those tokens
  1007. data.preserved_tokens = {"<|tool_call_start|>", "<|tool_call_end|>"};
  1008. } else if (is_json_schema_provided) {
  1009. LOG_INF("%s: Using provided json schema to build a grammar\n", __func__);
  1010. data.grammar = json_schema_to_grammar(inputs.json_schema);
  1011. } else if (is_grammar_provided) {
  1012. LOG_INF("%s: Using provided grammar\n", __func__);
  1013. data.grammar = inputs.grammar;
  1014. } else {
  1015. LOG_INF("%s: Using content relying on the template\n", __func__);
  1016. }
  1017. data.prompt = apply(tmpl, inputs, /* messages_override= */ tweaked_messages);
  1018. LOG_DBG("%s: Prompt: %s\n", __func__, data.prompt.c_str());
  1019. return data;
  1020. }
  1021. static common_chat_params common_chat_params_init_ministral_3(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1022. common_chat_params data;
  1023. // Build up messages to follow the format: https://huggingface.co/mistralai/Ministral-3-14B-Reasoning-2512/blob/main/chat_template.jinja
  1024. auto adjusted_messages = json::array();
  1025. for (const auto & msg : inputs.messages) {
  1026. auto role = msg.value("role", "");
  1027. if (role != "system" && role != "assistant") {
  1028. // Only adjust system and assistant messages. Interestingly, the system message may contain thinking.
  1029. adjusted_messages.push_back(msg);
  1030. continue;
  1031. }
  1032. auto content = json::array();
  1033. // If message contains `reasoning_content`, add it as a block of type `thinking`
  1034. if (msg.contains("reasoning_content") && msg.at("reasoning_content").is_string()) {
  1035. content.push_back({
  1036. {"type", "thinking"},
  1037. {"thinking", msg.at("reasoning_content").get<std::string>()},
  1038. });
  1039. }
  1040. // If message contains `content`, add it as a block of type `text`
  1041. if (msg.contains("content")) {
  1042. if (msg.at("content").is_string()) {
  1043. content.push_back({
  1044. {"type", "text"},
  1045. {"text", msg.at("content").get<std::string>()},
  1046. });
  1047. } else if (msg.at("content").is_array()) {
  1048. auto blocks = msg.at("content");
  1049. content.insert(content.end(), blocks.begin(), blocks.end());
  1050. }
  1051. }
  1052. auto adjusted = msg;
  1053. adjusted["content"] = content;
  1054. adjusted.erase("reasoning_content");
  1055. adjusted_messages.push_back(adjusted);
  1056. }
  1057. auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
  1058. auto extract_reasoning = inputs.reasoning_format != COMMON_REASONING_FORMAT_NONE;
  1059. auto include_grammar = true;
  1060. data.prompt = apply(tmpl, inputs, /* messages_override = */ adjusted_messages);
  1061. data.format = COMMON_CHAT_FORMAT_PEG_NATIVE;
  1062. data.preserved_tokens = {
  1063. "[THINK]",
  1064. "[/THINK]",
  1065. "[TOOL_CALLS]",
  1066. "[ARGS]",
  1067. };
  1068. auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder & p) {
  1069. auto reasoning = extract_reasoning ? p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]") : p.eps();
  1070. // Response format parser
  1071. if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) {
  1072. // Ministral wants to emit json surrounded by code fences
  1073. return reasoning << "```json" << p.content(p.schema(p.json(), "response-format", inputs.json_schema)) << "```";
  1074. }
  1075. // Tool call parser
  1076. if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
  1077. auto tool_choice = p.choice();
  1078. foreach_function(inputs.tools, [&](const json & tool) {
  1079. const auto & function = tool.at("function");
  1080. std::string name = function.at("name");
  1081. const auto & schema = function.at("parameters");
  1082. tool_choice |= p.rule("tool-" + name,
  1083. p.tool_open(p.tool_name(p.literal(name)) + "[ARGS]")
  1084. + p.tool_args(p.schema(p.json(), "tool-" + name + "-schema", schema))
  1085. );
  1086. });
  1087. auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0;
  1088. auto max_calls = inputs.parallel_tool_calls ? -1 : 1;
  1089. auto tool_calls = p.trigger_rule("tool-call", p.repeat("[TOOL_CALLS]" + tool_choice, min_calls, max_calls));
  1090. return reasoning << p.content(p.until("[TOOL_CALLS]")) << tool_calls;
  1091. }
  1092. // Content only parser
  1093. include_grammar = false;
  1094. return reasoning << p.content(p.rest());
  1095. });
  1096. data.parser = parser.save();
  1097. if (include_grammar) {
  1098. data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO;
  1099. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1100. foreach_function(inputs.tools, [&](const json & tool) {
  1101. const auto & function = tool.at("function");
  1102. auto schema = function.at("parameters");
  1103. builder.resolve_refs(schema);
  1104. });
  1105. parser.build_grammar(builder, data.grammar_lazy);
  1106. });
  1107. data.grammar_triggers = {
  1108. {COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"}
  1109. };
  1110. }
  1111. return data;
  1112. }
  1113. static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1114. common_chat_params data;
  1115. data.prompt = apply(tmpl, inputs);
  1116. data.format = COMMON_CHAT_FORMAT_MAGISTRAL;
  1117. data.preserved_tokens = {
  1118. "[THINK]",
  1119. "[/THINK]",
  1120. };
  1121. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1122. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1123. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1124. auto schemas = json::array();
  1125. foreach_function(inputs.tools, [&](const json & tool) {
  1126. const auto & function = tool.at("function");
  1127. schemas.push_back({
  1128. {"type", "object"},
  1129. {"properties", {
  1130. {"name", {
  1131. {"type", "string"},
  1132. {"const", function.at("name")},
  1133. }},
  1134. {"arguments", function.at("parameters")},
  1135. {"id", {
  1136. {"type", "string"},
  1137. {"pattern", "^[a-zA-Z0-9]{9}$"},
  1138. }},
  1139. }},
  1140. {"required", json::array({"name", "arguments", "id"})},
  1141. });
  1142. });
  1143. auto schema = json {
  1144. {"type", "array"},
  1145. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1146. {"minItems", 1},
  1147. };
  1148. if (!inputs.parallel_tool_calls) {
  1149. schema["maxItems"] = 1;
  1150. }
  1151. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  1152. });
  1153. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  1154. data.preserved_tokens.push_back("[TOOL_CALLS]");
  1155. } else {
  1156. data.grammar_lazy = false;
  1157. if (!inputs.json_schema.is_null()) {
  1158. if (!inputs.grammar.empty()) {
  1159. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  1160. }
  1161. data.grammar = json_schema_to_grammar(inputs.json_schema);
  1162. } else {
  1163. data.grammar = inputs.grammar;
  1164. }
  1165. }
  1166. return data;
  1167. }
  1168. static common_chat_params common_chat_params_init_command_r7b(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1169. common_chat_params data;
  1170. auto adjusted_messages = json::array();
  1171. for (const auto & msg : inputs.messages) {
  1172. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  1173. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  1174. if (has_reasoning_content && has_tool_calls) {
  1175. auto adjusted_message = msg;
  1176. adjusted_message["tool_plan"] = msg.at("reasoning_content");
  1177. adjusted_message.erase("reasoning_content");
  1178. adjusted_messages.push_back(adjusted_message);
  1179. } else {
  1180. adjusted_messages.push_back(msg);
  1181. }
  1182. }
  1183. data.prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  1184. data.format = COMMON_CHAT_FORMAT_COMMAND_R7B;
  1185. if (string_ends_with(data.prompt, "<|START_THINKING|>")) {
  1186. if (!inputs.enable_thinking) {
  1187. data.prompt += "<|END_THINKING|>";
  1188. } else {
  1189. data.thinking_forced_open = true;
  1190. }
  1191. } else if (!inputs.enable_thinking && string_ends_with(data.prompt, "<|CHATBOT_TOKEN|>")) {
  1192. data.prompt += "<|START_THINKING|><|END_THINKING|>";
  1193. }
  1194. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1195. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1196. auto schemas = json::array();
  1197. foreach_function(inputs.tools, [&](const json & tool) {
  1198. const auto & function = tool.at("function");
  1199. schemas.push_back({
  1200. {"type", "object"},
  1201. {"properties", {
  1202. {"tool_call_id", {
  1203. {"type", "string"},
  1204. // Command-R's template expects an integer string.
  1205. {"pattern", "^[0-9]{1,10}$"},
  1206. }},
  1207. {"tool_name", {
  1208. {"type", "string"},
  1209. {"const", function.at("name")},
  1210. }},
  1211. {"parameters", function.at("parameters")},
  1212. }},
  1213. {"required", json::array({"tool_call_id", "tool_name", "parameters"})},
  1214. });
  1215. });
  1216. auto schema = json {
  1217. {"type", "array"},
  1218. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1219. {"minItems", 1},
  1220. };
  1221. if (!inputs.parallel_tool_calls) {
  1222. schema["maxItems"] = 1;
  1223. }
  1224. builder.add_rule("root",
  1225. std::string(data.thinking_forced_open ? "( \"<|END_THINKING|>\" space )? " : "") +
  1226. "\"<|START_ACTION|>\" " + builder.add_schema("tool_calls", schema) + " \"<|END_ACTION|>\"");
  1227. });
  1228. data.grammar_triggers.push_back({
  1229. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1230. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1231. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1232. std::string(data.thinking_forced_open ? "[\\s\\S]*?(<\\|END_THINKING\\|>\\s*)" : "(?:<\\|START_THINKING\\|>[\\s\\S]*?<\\|END_THINKING\\|>\\s*)?") +
  1233. "(<\\|START_ACTION\\|>)[\\s\\S]*"
  1234. });
  1235. data.preserved_tokens = {
  1236. "<|START_ACTION|>",
  1237. "<|END_ACTION|>",
  1238. "<|START_RESPONSE|>",
  1239. "<|END_RESPONSE|>",
  1240. "<|START_THINKING|>",
  1241. "<|END_THINKING|>",
  1242. };
  1243. return data;
  1244. }
  1245. static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector<std::string> & expected_properties) {
  1246. if (!parameters.is_object() || !parameters.contains("type") || parameters.at("type") != "object" || !parameters.contains("properties") || !parameters.contains("required")) {
  1247. throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties");
  1248. }
  1249. const auto & parameters_properties = parameters.at("properties");
  1250. const auto & parameters_required = parameters.at("required");
  1251. for (const auto & prop : expected_properties) {
  1252. if (!parameters_properties.contains(prop)) {
  1253. throw std::runtime_error("Parameters of tool " + name + " is missing property: " + prop); // NOLINT
  1254. }
  1255. if (std::find(parameters_required.begin(), parameters_required.end(), json(prop)) == parameters_required.end()) {
  1256. throw std::runtime_error("Parameters of tool " + name + " must have property marked as required: " + prop); // NOLINT
  1257. }
  1258. }
  1259. if (parameters_properties.size() != expected_properties.size()) {
  1260. throw std::runtime_error("Parameters of tool " + name + " must only have these properties:" + string_join(expected_properties, ", "));
  1261. }
  1262. }
  1263. static common_chat_params common_chat_params_init_llama_3_x(const common_chat_template & tmpl, const struct templates_params & inputs, bool allow_python_tag_builtin_tools) {
  1264. auto builtin_tools = json::array();
  1265. common_chat_params data;
  1266. if (!inputs.tools.is_null()) {
  1267. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1268. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1269. std::vector<std::string> tool_rules;
  1270. auto handle_builtin_tool = [&](const std::string & name, const json & parameters) {
  1271. if (name == "wolfram_alpha" || name == "web_search" || name == "brave_search") {
  1272. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
  1273. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
  1274. expect_tool_parameters(name, parameters, {"query"});
  1275. } else if (name == "python" || name == "code_interpreter") {
  1276. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
  1277. expect_tool_parameters(name, parameters, {"code"});
  1278. } else {
  1279. return false;
  1280. }
  1281. std::vector<std::string> kvs;
  1282. for (const auto & [key, value] : parameters.at("properties").items()) {
  1283. kvs.push_back("\"" + key + "=\" " + builder.add_schema(name + "-args-" + key, value)); // NOLINT
  1284. }
  1285. tool_rules.push_back(
  1286. builder.add_rule(
  1287. name + "-call",
  1288. "\"<|python_tag|>" + name + ".call(\" " + string_join(kvs, " \", \" ") + " \")\""));
  1289. builtin_tools.push_back(name);
  1290. return true;
  1291. };
  1292. foreach_function(inputs.tools, [&](const json & tool) {
  1293. const auto & function = tool.at("function");
  1294. std::string name = function.at("name");
  1295. auto parameters = function.at("parameters");
  1296. builder.resolve_refs(parameters);
  1297. // https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote/tool_runtime
  1298. if (allow_python_tag_builtin_tools) {
  1299. handle_builtin_tool(name, parameters);
  1300. }
  1301. tool_rules.push_back(
  1302. builder.add_rule(
  1303. name + "-call",
  1304. "\"{\" space "
  1305. "( \"\\\"type\\\"\" space \":\" space \"\\\"function\\\"\" space \",\" space )? "
  1306. " \"\\\"name\\\"\" space \":\" space \"\\\"" + name + "\\\"\" space \",\" space "
  1307. " \"\\\"parameters\\\"\" space \":\" space " + builder.add_schema(name + "-args", parameters) + " "
  1308. "\"}\" space"));
  1309. });
  1310. // Small models may hallucinate function names so we match anything (*at the start*) that looks like the JSON of a function call, regardless of the name.
  1311. data.grammar_triggers.push_back({
  1312. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1313. "(\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\")[\\s\\S]*", // + name + "\"[\\s\\S]*",
  1314. });
  1315. if (!builtin_tools.empty()) {
  1316. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1317. data.preserved_tokens.push_back("<|python_tag|>");
  1318. }
  1319. // Allow a few empty lines on top of the usual constrained json schema space rule.
  1320. builder.add_rule("root", string_join(tool_rules, " | "));
  1321. data.additional_stops.push_back("<|eom_id|>");
  1322. });
  1323. data.format = allow_python_tag_builtin_tools && !builtin_tools.empty()
  1324. ? COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS
  1325. : COMMON_CHAT_FORMAT_LLAMA_3_X;
  1326. } else {
  1327. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1328. }
  1329. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, json {
  1330. {"date_string", format_time(inputs.now, "%d %b %Y")},
  1331. {"tools_in_user_message", false},
  1332. {"builtin_tools", builtin_tools},
  1333. });
  1334. return data;
  1335. }
  1336. static common_chat_params common_chat_params_init_nemotron_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1337. common_chat_params data;
  1338. // Generate the prompt using the apply() function with the template
  1339. data.prompt = apply(tmpl, inputs);
  1340. data.format = COMMON_CHAT_FORMAT_NEMOTRON_V2;
  1341. // Handle thinking tags appropriately based on inputs.enable_thinking
  1342. if (string_ends_with(data.prompt, "<think>\n")) {
  1343. if (!inputs.enable_thinking) {
  1344. data.prompt += "</think>";
  1345. } else {
  1346. data.thinking_forced_open = true;
  1347. }
  1348. }
  1349. // When tools are present, build grammar for the <TOOLCALL> format, similar to CommandR, but without tool call ID
  1350. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1351. data.grammar_lazy = true;
  1352. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1353. auto schemas = json::array();
  1354. foreach_function(inputs.tools, [&](const json & tool) {
  1355. const auto & function = tool.at("function");
  1356. schemas.push_back({
  1357. { "type", "object" },
  1358. { "properties",
  1359. {
  1360. { "name",
  1361. {
  1362. { "type", "string" },
  1363. { "const", function.at("name") },
  1364. } },
  1365. { "arguments", function.at("parameters") },
  1366. } },
  1367. { "required", json::array({ "name", "arguments" }) },
  1368. });
  1369. });
  1370. auto schema = json{
  1371. { "type", "array" },
  1372. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1373. { "minItems", 1 },
  1374. };
  1375. if (!inputs.parallel_tool_calls) {
  1376. schema["maxItems"] = 1;
  1377. }
  1378. builder.add_rule("root",
  1379. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1380. "\"<TOOLCALL>\" " + builder.add_schema("tool_calls", schema) +
  1381. " \"</TOOLCALL>\"");
  1382. });
  1383. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1384. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1385. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1386. std::string(data.thinking_forced_open ?
  1387. "[\\s\\S]*?(</think>\\s*)" :
  1388. "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1389. "(<TOOLCALL>)[\\s\\S]*" });
  1390. }
  1391. return data;
  1392. }
  1393. static common_chat_params common_chat_params_init_nemotron_v3(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1394. common_chat_params data;
  1395. data.prompt = apply(tmpl, inputs);
  1396. data.format = COMMON_CHAT_FORMAT_PEG_CONSTRUCTED;
  1397. // Handle thinking tags appropriately based on inputs.enable_thinking
  1398. if (string_ends_with(data.prompt, "<think>\n")) {
  1399. if (!inputs.enable_thinking) {
  1400. data.prompt += "</think>";
  1401. } else {
  1402. data.thinking_forced_open = true;
  1403. }
  1404. }
  1405. data.preserved_tokens = {
  1406. "<think>",
  1407. "</think>",
  1408. "<tool_call>",
  1409. "</tool_call>",
  1410. };
  1411. auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
  1412. auto extract_reasoning = inputs.reasoning_format != COMMON_REASONING_FORMAT_NONE;
  1413. auto include_grammar = true;
  1414. auto parser = build_chat_peg_constructed_parser([&](auto & p) {
  1415. auto reasoning = p.eps();
  1416. if (inputs.enable_thinking && extract_reasoning) {
  1417. auto reasoning_content = p.reasoning(p.until("</think>")) + ("</think>" | p.end());
  1418. if (data.thinking_forced_open) {
  1419. reasoning = reasoning_content;
  1420. }
  1421. }
  1422. // Response format parser
  1423. if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) {
  1424. return reasoning << p.content(p.schema(p.json(), "response-format", inputs.json_schema));
  1425. }
  1426. // Tool call parser
  1427. if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
  1428. auto tool_choice = p.choice();
  1429. foreach_function(inputs.tools, [&](const json & tool) {
  1430. const auto & function = tool.at("function");
  1431. std::string name = function.at("name");
  1432. auto parameters = function.at("parameters");
  1433. auto schema_info = common_schema_info();
  1434. schema_info.resolve_refs(parameters);
  1435. auto tool_open = "<function=" + p.tool_name(p.literal(name)) + ">\n";
  1436. auto tool_close = p.literal("</function>\n");
  1437. auto args = p.sequence();
  1438. auto arg_string = p.rule("xml-arg-string", p.until_one_of({
  1439. "\n</parameter>",
  1440. "\n<parameter=",
  1441. "\n</function>"
  1442. }));
  1443. foreach_parameter(function, [&](const auto & param_name, const json & param_schema, bool is_required) {
  1444. auto rule_name = "tool-" + name + "-arg-" + param_name;
  1445. auto arg_open = "<parameter=" + p.tool_arg_name(p.literal(param_name)) + ">\n";
  1446. auto arg_close = p.literal("</parameter>\n");
  1447. auto arg_value = p.eps();
  1448. if (schema_info.resolves_to_string(param_schema)) {
  1449. arg_value = p.tool_arg_string_value(arg_string) + "\n";
  1450. } else {
  1451. arg_value = p.tool_arg_json_value(p.schema(p.json(), rule_name + "-schema", param_schema));
  1452. }
  1453. // Model may or my not close with </parameter>
  1454. auto arg_rule = p.rule(rule_name, p.tool_arg_open(arg_open) + arg_value + p.optional(p.tool_arg_close(arg_close)));
  1455. args += p.repeat(arg_rule, /* min = */ is_required ? 1 : 0, /* max = */ 1);
  1456. });
  1457. tool_choice |= p.rule("tool-" + name, p.tool_open(tool_open) + args + p.tool_close(tool_close));
  1458. });
  1459. auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0;
  1460. auto max_calls = inputs.parallel_tool_calls ? -1 : 1;
  1461. auto tool_call = p.rule("tool-call", "<tool_call>\n" + tool_choice + "</tool_call>" + p.space());
  1462. auto tool_calls = p.trigger_rule("tool-call-root", p.repeat(tool_call, /* min = */ min_calls, /* max = */ max_calls));
  1463. return reasoning << p.content(p.until("<tool_call>")) << tool_calls;
  1464. }
  1465. // Content only parser
  1466. include_grammar = false;
  1467. return reasoning << p.content(p.rest());
  1468. });
  1469. data.parser = parser.save();
  1470. if (include_grammar) {
  1471. data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO;
  1472. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1473. foreach_function(inputs.tools, [&](const json & tool) {
  1474. const auto & function = tool.at("function");
  1475. auto schema = function.at("parameters");
  1476. builder.resolve_refs(schema);
  1477. });
  1478. parser.build_grammar(builder, data.grammar_lazy);
  1479. });
  1480. data.grammar_triggers = {
  1481. {COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<tool_call>"}
  1482. };
  1483. }
  1484. return data;
  1485. }
  1486. static common_chat_params common_chat_params_init_apertus(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1487. common_chat_params data;
  1488. // Generate the prompt using the apply() function with the template
  1489. data.prompt = apply(tmpl, inputs);
  1490. data.format = COMMON_CHAT_FORMAT_APERTUS;
  1491. // Handle thinking tags appropriately based on inputs.enable_thinking
  1492. if (string_ends_with(data.prompt, "<|inner_prefix|>")) {
  1493. if (!inputs.enable_thinking) {
  1494. data.prompt += "<|inner_suffix|>";
  1495. } else {
  1496. data.thinking_forced_open = true;
  1497. }
  1498. }
  1499. // When tools are present, build grammar for the <|tools_prefix|> format
  1500. if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
  1501. data.grammar_lazy = true;
  1502. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1503. auto schemas = json::array();
  1504. foreach_function(inputs.tools, [&](const json & tool) {
  1505. const auto & function = tool.at("function");
  1506. schemas.push_back({
  1507. { "type", "object" },
  1508. { "properties",
  1509. {
  1510. { function.at("name"), function.at("parameters") }
  1511. } },
  1512. { "required", json::array({ function.at("name") }) },
  1513. });
  1514. });
  1515. auto schema = json{
  1516. { "type", "array" },
  1517. { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
  1518. { "minItems", 1 },
  1519. };
  1520. if (!inputs.parallel_tool_calls) {
  1521. schema["maxItems"] = 1;
  1522. }
  1523. builder.add_rule("root",
  1524. std::string(data.thinking_forced_open ? "( \"<|inner_suffix|>\" space )? " : "") +
  1525. "\"<|tools_prefix|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tools_suffix|>\"");
  1526. });
  1527. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1528. // If thinking_forced_open, then we capture the <|inner_suffix|> tag in the grammar,
  1529. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1530. std::string(data.thinking_forced_open ?
  1531. "[\\s\\S]*?(<\\|inner_suffix\\|>\\s*)" :
  1532. "(?:<\\|inner_prefix\\|>[\\s\\S]*?<\\|inner_suffix\\|>\\s*)?") +
  1533. "(<\\|tools_prefix\\|>)[\\s\\S]*" });
  1534. data.preserved_tokens = {
  1535. "<|system_start|>",
  1536. "<|system_end|>",
  1537. "<|developer_start|>",
  1538. "<|developer_end|>",
  1539. "<|user_start|>",
  1540. "<|user_end|>",
  1541. "<|assistant_start|>",
  1542. "<|assistant_end|>",
  1543. "<|inner_prefix|>",
  1544. "<|inner_suffix|>",
  1545. "<|tools_prefix|>",
  1546. "<|tools_suffix|>",
  1547. };
  1548. }
  1549. return data;
  1550. }
  1551. static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1552. common_chat_params data;
  1553. auto prompt = apply(tmpl, inputs);
  1554. // Hacks to fix the official (broken) prompt.
  1555. // It is advisable to use --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja instead,
  1556. // until the official template is fixed.
  1557. if (tmpl.source().find("{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}") != std::string::npos) {
  1558. // Don't leave the chat dangling after tool results
  1559. if (string_ends_with(prompt, "<|tool▁outputs▁end|>")) {
  1560. prompt += "<|end▁of▁sentence|>";
  1561. if (inputs.add_generation_prompt) {
  1562. prompt += "<|Assistant|>";
  1563. }
  1564. }
  1565. // Fix up tool call delta example added by Minja
  1566. prompt = std::regex_replace(
  1567. prompt,
  1568. std::regex("(<|tool▁call▁end|>)[\\s\\r\\n]*(<|tool▁outputs▁begin|>|<|User|>)"),
  1569. "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2");
  1570. }
  1571. data.prompt = prompt;
  1572. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_R1;
  1573. if (string_ends_with(data.prompt, "<think>\n")) {
  1574. if (!inputs.enable_thinking) {
  1575. data.prompt += "</think>";
  1576. } else {
  1577. data.thinking_forced_open = true;
  1578. }
  1579. }
  1580. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1581. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1582. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1583. std::vector<std::string> tool_rules;
  1584. foreach_function(inputs.tools, [&](const json & tool) {
  1585. const auto & function = tool.at("function");
  1586. std::string name = function.at("name");
  1587. auto parameters = function.at("parameters");
  1588. builder.resolve_refs(parameters);
  1589. tool_rules.push_back(builder.add_rule(name + "-call",
  1590. "( \"<|tool▁call▁begin|>\" )? \"function<|tool▁sep|>" + name + "\\n"
  1591. "```json\\n\" " + builder.add_schema(name + "-args", parameters) + " "
  1592. "\"```<|tool▁call▁end|>\""));
  1593. });
  1594. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1595. // so we accept common variants (then it's all constrained)
  1596. builder.add_rule("root",
  1597. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1598. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1599. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1600. "\"<|tool▁calls▁end|>\""
  1601. " space");
  1602. data.grammar_triggers.push_back({
  1603. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1604. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1605. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1606. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1607. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1608. });
  1609. data.preserved_tokens = {
  1610. "<think>",
  1611. "</think>",
  1612. "<|tool▁calls▁begin|>",
  1613. "<|tool▁call▁begin|>",
  1614. "<|tool▁sep|>",
  1615. "<|tool▁call▁end|>",
  1616. "<|tool▁calls▁end|",
  1617. };
  1618. });
  1619. }
  1620. return data;
  1621. }
  1622. static common_chat_params common_chat_params_init_deepseek_v3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1623. common_chat_params data;
  1624. // Pass thinking context for DeepSeek V3.1 template
  1625. json additional_context = {
  1626. {"thinking", inputs.enable_thinking},
  1627. };
  1628. auto prompt = apply(tmpl, inputs,
  1629. /* messages_override= */ inputs.messages,
  1630. /* tools_override= */ std::nullopt,
  1631. additional_context);
  1632. data.prompt = prompt;
  1633. data.format = COMMON_CHAT_FORMAT_DEEPSEEK_V3_1;
  1634. if (string_ends_with(data.prompt, "<think>")) {
  1635. if (!inputs.enable_thinking) {
  1636. data.prompt += "</think>";
  1637. } else {
  1638. data.thinking_forced_open = true;
  1639. }
  1640. }
  1641. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1642. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  1643. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1644. std::vector<std::string> tool_rules;
  1645. foreach_function(inputs.tools, [&](const json & tool) {
  1646. const auto & function = tool.at("function");
  1647. std::string name = function.at("name");
  1648. auto parameters = function.at("parameters");
  1649. builder.resolve_refs(parameters);
  1650. tool_rules.push_back(builder.add_rule(name + "-call",
  1651. "( \"<|tool▁call▁begin|>\" )? \"" + name + "<|tool▁sep|>"
  1652. "\" " + builder.add_schema(name + "-args", parameters) + " "
  1653. "\"<|tool▁call▁end|>\""));
  1654. });
  1655. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  1656. // so we accept common variants (then it's all constrained)
  1657. builder.add_rule("root",
  1658. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  1659. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" | \"<|tool▁calls|>\" ) "
  1660. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  1661. "\"<|tool▁calls▁end|>\""
  1662. " space");
  1663. data.grammar_triggers.push_back({
  1664. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1665. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  1666. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  1667. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") +
  1668. "(<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>|<|tool▁calls|>)[\\s\\S]*"
  1669. });
  1670. data.preserved_tokens = {
  1671. "<think>",
  1672. "</think>",
  1673. "<|tool▁calls▁begin|>",
  1674. "<|tool▁call▁begin|>",
  1675. "<|tool▁sep|>",
  1676. "<|tool▁call▁end|>",
  1677. "<|tool▁calls▁end|>",
  1678. };
  1679. });
  1680. }
  1681. return data;
  1682. }
  1683. static common_chat_params common_chat_params_init_minimax_m2(const common_chat_template & tmpl, const struct templates_params & params) {
  1684. common_chat_params data;
  1685. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1686. data.prompt = apply(tmpl, params);
  1687. data.format = COMMON_CHAT_FORMAT_MINIMAX_M2;
  1688. // Handle thinking tags based on prompt ending
  1689. if (string_ends_with(data.prompt, "<think>\n")) {
  1690. if (!params.enable_thinking) {
  1691. // Close the thinking tag immediately if thinking is disabled
  1692. data.prompt += "</think>\n\n";
  1693. } else {
  1694. // Mark thinking as forced open (template started with <think>)
  1695. data.thinking_forced_open = true;
  1696. }
  1697. }
  1698. // Preserve MiniMax-M2 special tokens
  1699. data.preserved_tokens = {
  1700. "<think>",
  1701. "</think>",
  1702. "<minimax:tool_call>",
  1703. "</minimax:tool_call>",
  1704. };
  1705. // build grammar for tool call
  1706. static const xml_tool_call_format form {
  1707. /* form.scope_start = */ "<minimax:tool_call>\n",
  1708. /* form.tool_start = */ "<invoke name=\"",
  1709. /* form.tool_sep = */ "\">\n",
  1710. /* form.key_start = */ "<parameter name=\"",
  1711. /* form.key_val_sep = */ "\">",
  1712. /* form.val_end = */ "</parameter>\n",
  1713. /* form.tool_end = */ "</invoke>\n",
  1714. /* form.scope_end = */ "</minimax:tool_call>",
  1715. };
  1716. build_grammar_xml_tool_call(data, params.tools, form);
  1717. return data;
  1718. }
  1719. static common_chat_params common_chat_params_init_qwen3_coder_xml(const common_chat_template & tmpl, const struct templates_params & params) {
  1720. common_chat_params data;
  1721. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1722. data.prompt = apply(tmpl, params);
  1723. data.format = COMMON_CHAT_FORMAT_QWEN3_CODER_XML;
  1724. data.preserved_tokens = {
  1725. "<tool_call>",
  1726. "</tool_call>",
  1727. "<function=",
  1728. "</function>",
  1729. "<parameter=",
  1730. "</parameter>",
  1731. };
  1732. // build grammar for tool call
  1733. static const xml_tool_call_format form {
  1734. /* form.scope_start = */ "<tool_call>\n",
  1735. /* form.tool_start = */ "<function=",
  1736. /* form.tool_sep = */ ">\n",
  1737. /* form.key_start = */ "<parameter=",
  1738. /* form.key_val_sep = */ ">\n",
  1739. /* form.val_end = */ "\n</parameter>\n",
  1740. /* form.tool_end = */ "</function>\n",
  1741. /* form.scope_end = */ "</tool_call>",
  1742. };
  1743. build_grammar_xml_tool_call(data, params.tools, form);
  1744. return data;
  1745. }
  1746. static common_chat_params common_chat_params_init_kimi_k2(const common_chat_template & tmpl, const struct templates_params & params) {
  1747. common_chat_params data;
  1748. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1749. data.prompt = apply(tmpl, params);
  1750. data.format = COMMON_CHAT_FORMAT_KIMI_K2;
  1751. data.preserved_tokens = {
  1752. "<think>",
  1753. "</think>",
  1754. "<|tool_calls_section_begin|>",
  1755. "<|tool_call_begin|>",
  1756. "<|tool_call_argument_begin|>",
  1757. "<|tool_call_end|>",
  1758. "<|tool_calls_section_end|>",
  1759. "<|im_end|>",
  1760. "<|im_system|>",
  1761. "<|im_middle|>",
  1762. };
  1763. data.additional_stops.insert(data.additional_stops.end(), {
  1764. "<|im_end|>",
  1765. "<|im_middle|>"
  1766. });
  1767. // build grammar for tool call
  1768. static const xml_tool_call_format form = ([]() {
  1769. xml_tool_call_format form {};
  1770. form.scope_start = "<|tool_calls_section_begin|>";
  1771. form.tool_start = "<|tool_call_begin|>";
  1772. form.tool_sep = "<|tool_call_argument_begin|>{";
  1773. form.key_start = "\"";
  1774. form.key_val_sep = "\": ";
  1775. form.val_end = ", ";
  1776. form.tool_end = "}<|tool_call_end|>";
  1777. form.scope_end = "<|tool_calls_section_end|>";
  1778. form.raw_argval = false;
  1779. form.last_val_end = "";
  1780. return form;
  1781. })();
  1782. build_grammar_xml_tool_call(data, params.tools, form);
  1783. return data;
  1784. }
  1785. static common_chat_params common_chat_params_init_apriel_1_5(const common_chat_template & tmpl, const struct templates_params & params) {
  1786. common_chat_params data;
  1787. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1788. data.prompt = apply(tmpl, params);
  1789. data.format = COMMON_CHAT_FORMAT_APRIEL_1_5;
  1790. data.preserved_tokens = {
  1791. "<thinking>",
  1792. "</thinking>",
  1793. "<tool_calls>",
  1794. "</tool_calls>",
  1795. };
  1796. // build grammar for tool call
  1797. static const xml_tool_call_format form = ([]() {
  1798. xml_tool_call_format form {};
  1799. form.scope_start = "<tool_calls>[";
  1800. form.tool_start = "{\"name\": \"";
  1801. form.tool_sep = "\", \"arguments\": {";
  1802. form.key_start = "\"";
  1803. form.key_val_sep = "\": ";
  1804. form.val_end = ", ";
  1805. form.tool_end = "}, ";
  1806. form.scope_end = "]</tool_calls>";
  1807. form.raw_argval = false;
  1808. form.last_val_end = "";
  1809. form.last_tool_end = "}";
  1810. return form;
  1811. })();
  1812. build_grammar_xml_tool_call(data, params.tools, form);
  1813. return data;
  1814. }
  1815. static common_chat_params common_chat_params_init_xiaomi_mimo(const common_chat_template & tmpl, const struct templates_params & params) {
  1816. common_chat_params data;
  1817. data.grammar_lazy = params.tools.is_array() && !params.tools.empty() && params.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1818. data.prompt = apply(tmpl, params);
  1819. data.format = COMMON_CHAT_FORMAT_XIAOMI_MIMO;
  1820. data.preserved_tokens = {
  1821. "<tool_call>",
  1822. "</tool_call>",
  1823. };
  1824. // build grammar for tool call
  1825. static const xml_tool_call_format form = ([]() {
  1826. xml_tool_call_format form {};
  1827. form.scope_start = "\n";
  1828. form.tool_start = "<tool_call>\n{\"name\": \"";
  1829. form.tool_sep = "\", \"arguments\": {";
  1830. form.key_start = "\"";
  1831. form.key_val_sep = "\": ";
  1832. form.val_end = ", ";
  1833. form.tool_end = "}\n</tool_call>";
  1834. form.scope_end = "";
  1835. form.raw_argval = false;
  1836. form.last_val_end = "";
  1837. return form;
  1838. })();
  1839. build_grammar_xml_tool_call(data, params.tools, form);
  1840. return data;
  1841. }
  1842. static common_chat_params common_chat_params_init_gpt_oss(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1843. common_chat_params data;
  1844. // Copy reasoning to the "thinking" field as expected by the gpt-oss template
  1845. auto adjusted_messages = json::array();
  1846. for (const auto & msg : inputs.messages) {
  1847. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  1848. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  1849. if (has_reasoning_content && has_tool_calls) {
  1850. auto adjusted_message = msg;
  1851. adjusted_message["thinking"] = msg.at("reasoning_content");
  1852. adjusted_messages.push_back(adjusted_message);
  1853. } else {
  1854. adjusted_messages.push_back(msg);
  1855. }
  1856. }
  1857. auto prompt = apply(tmpl, inputs, /* messages_override= */ adjusted_messages);
  1858. // Check if we need to replace the return token with end token during
  1859. // inference and without generation prompt. For more details see:
  1860. // https://github.com/ggml-org/llama.cpp/issues/15417
  1861. if (inputs.is_inference && !inputs.add_generation_prompt) {
  1862. static constexpr std::string_view return_token = "<|return|>";
  1863. static constexpr std::string_view end_token = "<|end|>";
  1864. if (size_t pos = prompt.rfind(return_token); pos != std::string::npos) {
  1865. prompt.replace(pos, return_token.length(), end_token);
  1866. }
  1867. }
  1868. data.prompt = prompt;
  1869. data.format = COMMON_CHAT_FORMAT_GPT_OSS;
  1870. // These special tokens are required to parse properly, so we include them
  1871. // even if parse_tool_calls is false.
  1872. data.preserved_tokens = {
  1873. "<|channel|>",
  1874. "<|constrain|>",
  1875. "<|message|>",
  1876. "<|start|>",
  1877. "<|end|>",
  1878. };
  1879. if (!inputs.json_schema.is_null()) {
  1880. data.grammar_lazy = false;
  1881. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1882. auto schema = inputs.json_schema;
  1883. builder.resolve_refs(schema);
  1884. auto not_end = builder.add_rule("not-end",
  1885. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1886. auto analysis = builder.add_rule("analysis",
  1887. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1888. auto constraint = builder.add_rule("constraint", "\"<|constrain|>\"? [a-zA-Z0-9_-]+");
  1889. auto final = builder.add_rule("final",
  1890. "\"<|channel|>final\" ( \" \" " + constraint + " )? \"<|message|>\" " +
  1891. builder.add_schema("response", schema)
  1892. );
  1893. builder.add_rule("root", "( " + analysis + " \"<|start|>assistant\" )? " + final);
  1894. });
  1895. }
  1896. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1897. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1898. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1899. // tool calls can appear in commentary or analysis channels
  1900. auto channel = builder.add_rule("channel", "\"<|channel|>\" ( \"commentary\" | \"analysis\" )");
  1901. std::vector<std::string> tool_rules_recipient_in_role;
  1902. std::vector<std::string> tool_rules_recipient_in_channel;
  1903. foreach_function(inputs.tools, [&](const json & tool) {
  1904. const auto & function = tool.at("function");
  1905. std::string name = function.at("name");
  1906. auto parameters = function.at("parameters");
  1907. builder.resolve_refs(parameters);
  1908. tool_rules_recipient_in_role.push_back(
  1909. builder.add_rule(name + "-call",
  1910. "\"" + name + "\"" + channel + " \" <|constrain|>json\"? \"<|message|>\" " +
  1911. builder.add_schema(name + "-args", parameters)
  1912. )
  1913. );
  1914. tool_rules_recipient_in_channel.push_back(
  1915. builder.add_rule(name + "-call",
  1916. "\"" + name + "\"" + " \" <|constrain|>json\"? \"<|message|>\" " +
  1917. builder.add_schema(name + "-args", parameters)
  1918. )
  1919. );
  1920. });
  1921. auto recipient_in_channel = builder.add_rule("recipient_in_channel",
  1922. channel + " \" to=functions.\" ( " +
  1923. string_join(tool_rules_recipient_in_channel, " | ") + " )"
  1924. );
  1925. if (data.grammar_lazy) {
  1926. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1927. "\"<|start|>assistant\"? \" to=functions.\" ( " +
  1928. string_join(tool_rules_recipient_in_role, " | ") + " )"
  1929. );
  1930. builder.add_rule("root", recipient_in_role + " | " + recipient_in_channel);
  1931. } else {
  1932. auto not_end = builder.add_rule("not-end",
  1933. "[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
  1934. auto analysis = builder.add_rule("analysis",
  1935. "\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1936. auto commentary = builder.add_rule("commentary",
  1937. "\"<|channel|>commentary<|message|>\" ( " + not_end + " )* \"<|end|>\"");
  1938. auto recipient_in_role = builder.add_rule("recipient_in_role",
  1939. "\" to=functions.\" ( " + string_join(tool_rules_recipient_in_role, " | ") + " )"
  1940. );
  1941. builder.add_rule("root",
  1942. "( " + analysis + " \"<|start|>assistant\" )? " +
  1943. "( " + commentary + " \"<|start|>assistant\" )? " +
  1944. "( " + recipient_in_role + " | " + recipient_in_channel + " )"
  1945. );
  1946. }
  1947. // Trigger on tool calls that appear in the commentary channel
  1948. data.grammar_triggers.push_back({
  1949. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1950. "<\\|channel\\|>(?:commentary|analysis) to"
  1951. });
  1952. // Trigger tool calls that appear in the role section, either at the
  1953. // start or in the middle.
  1954. data.grammar_triggers.push_back({
  1955. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  1956. "^ to"
  1957. });
  1958. data.grammar_triggers.push_back({
  1959. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1960. "<\\|start\\|>assistant to"
  1961. });
  1962. });
  1963. }
  1964. return data;
  1965. }
  1966. static common_chat_params common_chat_params_init_glm_4_5(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1967. common_chat_params data;
  1968. data.grammar_lazy = inputs.tools.is_array() && !inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1969. std::string prompt = apply(tmpl, inputs);
  1970. // match the existing trimming behavior
  1971. if (inputs.add_bos && string_starts_with(prompt, tmpl.bos_token())) {
  1972. prompt.erase(0, tmpl.bos_token().size());
  1973. }
  1974. if (inputs.add_eos && string_ends_with(prompt, tmpl.eos_token())) {
  1975. prompt.erase(prompt.size() - tmpl.eos_token().size());
  1976. }
  1977. if (string_ends_with(prompt, "<think>")) {
  1978. if (!inputs.enable_thinking) {
  1979. prompt += "</think>";
  1980. } else {
  1981. data.thinking_forced_open = true;
  1982. }
  1983. }
  1984. // add GLM preserved tokens
  1985. data.preserved_tokens = {
  1986. "<|endoftext|>",
  1987. "[MASK]",
  1988. "[gMASK]",
  1989. "[sMASK]",
  1990. "<sop>",
  1991. "<eop>",
  1992. "<|system|>",
  1993. "<|user|>",
  1994. "<|assistant|>",
  1995. "<|observation|>",
  1996. "<|begin_of_image|>",
  1997. "<|end_of_image|>",
  1998. "<|begin_of_video|>",
  1999. "<|end_of_video|>",
  2000. "<|begin_of_audio|>",
  2001. "<|end_of_audio|>",
  2002. "<|begin_of_transcription|>",
  2003. "<|end_of_transcription|>",
  2004. "<|code_prefix|>",
  2005. "<|code_middle|>",
  2006. "<|code_suffix|>",
  2007. "/nothink",
  2008. "<think>",
  2009. "</think>",
  2010. "<tool_call>",
  2011. "</tool_call>",
  2012. "<arg_key>",
  2013. "</arg_key>",
  2014. "<arg_value>",
  2015. "</arg_value>"
  2016. };
  2017. // extra GLM 4.5 stop word
  2018. data.additional_stops.insert(data.additional_stops.end(), {
  2019. "<|user|>",
  2020. "<|observation|>"
  2021. });
  2022. // build grammar for tool call
  2023. static const xml_tool_call_format form {
  2024. /* form.scope_start = */ "",
  2025. /* form.tool_start = */ "\n<tool_call>",
  2026. /* form.tool_sep = */ "\n",
  2027. /* form.key_start = */ "<arg_key>",
  2028. /* form.key_val_sep = */ "</arg_key>\n<arg_value>",
  2029. /* form.val_end = */ "</arg_value>\n",
  2030. /* form.tool_end = */ "</tool_call>\n",
  2031. /* form.scope_end = */ "",
  2032. };
  2033. build_grammar_xml_tool_call(data, inputs.tools, form);
  2034. data.prompt = prompt;
  2035. data.format = COMMON_CHAT_FORMAT_GLM_4_5;
  2036. return data;
  2037. }
  2038. static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2039. LOG_DBG("%s\n", __func__);
  2040. common_chat_params data;
  2041. const std::optional<json> tools_override = json();
  2042. const std::optional<json> additional_context = json {
  2043. {"datetime", format_time(inputs.now, "%b %d %Y %H:%M:%S GMT")},
  2044. {"functions", json(inputs.tools.empty() ? "" : inputs.tools.dump(2))},
  2045. };
  2046. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, tools_override, additional_context);
  2047. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  2048. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2049. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2050. auto schemas = json::array();
  2051. foreach_function(inputs.tools, [&](const json & tool) {
  2052. const auto & function = tool.at("function");
  2053. schemas.push_back({
  2054. {"type", "object"},
  2055. {"properties", {
  2056. {"name", {
  2057. {"type", "string"},
  2058. {"const", function.at("name")},
  2059. }},
  2060. {"arguments", function.at("parameters")},
  2061. }},
  2062. {"required", json::array({"name", "arguments", "id"})},
  2063. });
  2064. });
  2065. auto schema = json {
  2066. {"type", "array"},
  2067. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  2068. {"minItems", 1},
  2069. };
  2070. if (!inputs.parallel_tool_calls) {
  2071. schema["maxItems"] = 1;
  2072. }
  2073. builder.add_rule("root", "\" functools\"? " + builder.add_schema("tool_calls", schema));
  2074. });
  2075. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, " functools["});
  2076. data.preserved_tokens = {
  2077. " functools[",
  2078. };
  2079. data.format = COMMON_CHAT_FORMAT_FIREFUNCTION_V2;
  2080. } else {
  2081. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  2082. }
  2083. return data;
  2084. }
  2085. static common_chat_params common_chat_params_init_functionary_v3_2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2086. // >>>all\nlet's call functions>>>fn1\n{"arg1": 1...}\n>>>fn2\n{"arg1": 1...}...
  2087. // Using ">>>f1\n", ">>>f2\n"... as trigger words for the grammar
  2088. // If the function is python, we also allow raw python code (if the line after `python\n` doesn't start w/ opening `{`), which the model seems to prefer for multiline code.
  2089. common_chat_params data;
  2090. data.prompt = apply(tmpl, inputs);
  2091. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2;
  2092. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  2093. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2094. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2095. std::vector<std::string> first_tool_rules;
  2096. std::vector<std::string> subsequent_tool_rules;
  2097. foreach_function(inputs.tools, [&](const json & tool) {
  2098. const auto & function = tool.at("function");
  2099. std::string name = function.at("name");
  2100. auto parameters = function.at("parameters");
  2101. builder.resolve_refs(parameters);
  2102. std::string args_pattern = "[\\s\\S]*";
  2103. auto args_rule = builder.add_schema(name + "-args", parameters);
  2104. if (name == "python") {
  2105. args_rule = builder.add_rule(name + "-maybe-raw-args", args_rule + " | [^{] .*");
  2106. } else {
  2107. args_pattern = "\\{" + args_pattern;
  2108. }
  2109. auto call_rule = builder.add_rule(name + "-call", "\"" + name + "\\n\" " + args_rule);
  2110. first_tool_rules.push_back(call_rule);
  2111. if (inputs.parallel_tool_calls) {
  2112. subsequent_tool_rules.push_back(builder.add_rule(name + "-call2", "\">>>\" " + call_rule));
  2113. }
  2114. data.grammar_triggers.push_back({
  2115. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  2116. "((?:[\\s\\S]+?>>>)?" + regex_escape(name) + "\n)" + args_pattern,
  2117. });
  2118. });
  2119. data.preserved_tokens = {
  2120. "<|end_header_id|>",
  2121. };
  2122. auto first_rule = first_tool_rules.empty() ? "" : builder.add_rule("first_tool_call", string_join(first_tool_rules, " | ")) + " space";
  2123. if (inputs.parallel_tool_calls) {
  2124. auto subsequent_rule = builder.add_rule("subsequent_tool_call", string_join(subsequent_tool_rules, " | ")) + " space";
  2125. builder.add_rule("root", first_rule + " (" + subsequent_rule + ")*");
  2126. } else {
  2127. builder.add_rule("root", first_rule);
  2128. }
  2129. });
  2130. }
  2131. return data;
  2132. }
  2133. static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2134. // https://github.com/MeetKai/functionary/blob/main/tests/prompt_test_v3-llama3.1.txt
  2135. common_chat_params data;
  2136. if (!inputs.tools.is_null()) {
  2137. std::string python_code_argument_name;
  2138. auto has_raw_python = false;
  2139. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2140. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2141. std::vector<std::string> tool_rules;
  2142. foreach_function(inputs.tools, [&](const json & tool) {
  2143. const auto & function = tool.at("function");
  2144. const auto & parameters = function.at("parameters");
  2145. std::string name = function.at("name");
  2146. if (name == "python" || name == "ipython") {
  2147. if (!parameters.contains("type")) {
  2148. throw std::runtime_error("Missing type in python tool");
  2149. }
  2150. has_raw_python = true;
  2151. const auto & type = parameters.at("type");
  2152. if (type == "object") {
  2153. auto properties = parameters.at("properties");
  2154. for (auto it = properties.begin(); it != properties.end(); ++it) {
  2155. if (it.value().at("type") == "string") {
  2156. if (!python_code_argument_name.empty()) {
  2157. throw std::runtime_error("Multiple string arguments found in python tool");
  2158. }
  2159. python_code_argument_name = it.key();
  2160. }
  2161. }
  2162. if (python_code_argument_name.empty()) {
  2163. throw std::runtime_error("No string argument found in python tool");
  2164. }
  2165. } else if (type != "string") {
  2166. throw std::runtime_error("Invalid type in python tool: " + type.dump());
  2167. }
  2168. }
  2169. tool_rules.push_back(builder.add_rule(name + "-call", "\"<function=" + name + ">\" " + builder.add_schema(name + "-args", parameters) + " \"</function>\" space"));
  2170. });
  2171. if (has_raw_python) {
  2172. tool_rules.push_back(builder.add_rule("python-call", "\"<|python_tag|>\" .*"));
  2173. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  2174. data.preserved_tokens.push_back("<|python_tag|>");
  2175. }
  2176. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " space";
  2177. builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call);
  2178. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<function="});
  2179. });
  2180. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1;
  2181. } else {
  2182. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  2183. }
  2184. data.prompt = apply(tmpl, inputs);
  2185. // TODO: if (has_raw_python)
  2186. return data;
  2187. }
  2188. static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2189. common_chat_params data;
  2190. json extra_context = json {
  2191. {"enable_thinking", inputs.enable_thinking},
  2192. };
  2193. extra_context.update(inputs.extra_context);
  2194. data.prompt = apply(tmpl, inputs, /* messages_override =*/ std::nullopt, /* tools_override= */ std::nullopt, extra_context);
  2195. data.format = COMMON_CHAT_FORMAT_HERMES_2_PRO;
  2196. if (string_ends_with(data.prompt, "<think>\n")) {
  2197. if (!extra_context["enable_thinking"]) {
  2198. data.prompt += "</think>";
  2199. } else {
  2200. data.thinking_forced_open = true;
  2201. }
  2202. }
  2203. if (!inputs.tools.is_null()) {
  2204. // (content)?(<tool_call>{"name": "foo", "arguments": {"a": 1}}</tool_call>)*
  2205. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2206. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2207. std::vector<std::string> tool_rules;
  2208. std::vector<std::string> tool_call_alts;
  2209. std::vector<std::string> escaped_names;
  2210. foreach_function(inputs.tools, [&](const json & tool) {
  2211. const auto & function = tool.at("function");
  2212. std::string name = function.at("name");
  2213. auto parameters = function.at("parameters");
  2214. builder.resolve_refs(parameters);
  2215. tool_rules.push_back(builder.add_schema(name + "-call", {
  2216. {"type", "object"},
  2217. {"properties", json {
  2218. {"name", json {{"const", name}}},
  2219. {"arguments", parameters},
  2220. }},
  2221. {"required", json::array({"name", "arguments"})},
  2222. }));
  2223. tool_call_alts.push_back(builder.add_rule(
  2224. name + "-function-tag",
  2225. "\"<function\" ( \"=" + name + "\" | \" name=\\\"" + name + "\\\"\" ) \">\" space " +
  2226. builder.add_schema(name + "-args", parameters) + " "
  2227. "\"</function>\" space"));
  2228. data.grammar_triggers.push_back({
  2229. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  2230. "<function=" + name + ">",
  2231. });
  2232. auto escaped_name = regex_escape(name);
  2233. data.grammar_triggers.push_back({
  2234. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  2235. "<function\\s+name\\s*=\\s*\"" + escaped_name + "\"",
  2236. });
  2237. escaped_names.push_back(escaped_name);
  2238. });
  2239. auto any_tool_call = builder.add_rule("any_tool_call", "( " + string_join(tool_rules, " | ") + " ) space");
  2240. std::vector<std::string> alt_tags {
  2241. any_tool_call,
  2242. "\"<tool_call>\" space " + any_tool_call + " \"</tool_call>\"",
  2243. // The rest is just to accommodate common "good bad" outputs.
  2244. "\"<function_call>\" space " + any_tool_call + " \"</function_call>\"",
  2245. "\"<response>\" space " + any_tool_call + " \"</response>\"",
  2246. "\"<tools>\" space " + any_tool_call + " \"</tools>\"",
  2247. "\"<json>\" space " + any_tool_call + " \"</json>\"",
  2248. "\"<xml>\" space " + any_tool_call + " \"</xml>\"",
  2249. "\"<JSON>\" space " + any_tool_call + " \"</JSON>\"",
  2250. };
  2251. auto wrappable_tool_call = builder.add_rule("wrappable_tool_call", "( " + string_join(alt_tags, " | ") + " ) space");
  2252. tool_call_alts.push_back(wrappable_tool_call);
  2253. tool_call_alts.push_back(
  2254. "( \"```\\n\" | \"```json\\n\" | \"```xml\\n\" ) space " + wrappable_tool_call + " space \"```\" space ");
  2255. auto tool_call = builder.add_rule("tool_call", string_join(tool_call_alts, " | "));
  2256. builder.add_rule("root",
  2257. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  2258. (inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call));
  2259. // Trigger on some common known "good bad" outputs (only from the start and with a json that's about a specific argument name to avoid false positives)
  2260. data.grammar_triggers.push_back({
  2261. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  2262. // If thinking_forced_open, then we capture the </think> tag in the grammar,
  2263. // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
  2264. std::string(data.thinking_forced_open ? "(</think>\\s*)" : "") + (
  2265. "\\s*("
  2266. "(?:<tool_call>"
  2267. "|<function"
  2268. "|(?:```(?:json|xml)?\n\\s*)?(?:<function_call>|<tools>|<xml><json>|<response>)?"
  2269. "\\s*\\{\\s*\"name\"\\s*:\\s*\"(?:" + string_join(escaped_names, "|") + ")\""
  2270. ")"
  2271. ")"
  2272. ),
  2273. });
  2274. data.preserved_tokens = {
  2275. "<think>",
  2276. "</think>",
  2277. "<tool_call>",
  2278. "</tool_call>",
  2279. "<function",
  2280. "<tools>",
  2281. "</tools>",
  2282. "<response>",
  2283. "</response>",
  2284. "<function_call>",
  2285. "</function_call>",
  2286. "<json>",
  2287. "</json>",
  2288. "<JSON>",
  2289. "</JSON>",
  2290. "```",
  2291. "```json",
  2292. "```xml",
  2293. };
  2294. });
  2295. }
  2296. return data;
  2297. }
  2298. static common_chat_params common_chat_params_init_granite(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2299. common_chat_params data;
  2300. // Pass thinking context for Granite template
  2301. json additional_context = {
  2302. {"thinking", inputs.enable_thinking},
  2303. };
  2304. data.prompt = apply(tmpl, inputs, /* messages_override= */ std::nullopt, /* tools_override= */ std::nullopt, additional_context);
  2305. data.format = COMMON_CHAT_FORMAT_GRANITE;
  2306. if (string_ends_with(data.prompt, "<think>\n") || string_ends_with(data.prompt, "<think>")) {
  2307. if (!inputs.enable_thinking) {
  2308. data.prompt += "</think>";
  2309. } else {
  2310. data.thinking_forced_open = true;
  2311. }
  2312. }
  2313. if (!inputs.tools.is_null()) {
  2314. // Granite uses <|tool_call|> followed by JSON list
  2315. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2316. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2317. std::vector<std::string> tool_rules;
  2318. foreach_function(inputs.tools, [&](const json & tool) {
  2319. const auto & function = tool.at("function");
  2320. std::string name = function.at("name");
  2321. auto parameters = function.at("parameters");
  2322. builder.resolve_refs(parameters);
  2323. tool_rules.push_back(builder.add_rule(name + "-call", builder.add_schema(name +
  2324. "-args", {
  2325. {"type", "object"},
  2326. {"properties", {
  2327. {"name", {{"const", name}}},
  2328. {"arguments", parameters},
  2329. }},
  2330. {"required", json::array({"name", "arguments"})},
  2331. })));
  2332. });
  2333. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | "));
  2334. auto tool_list = builder.add_rule("tool_list", "\"[\" space " + tool_call + " (\",\" space " + tool_call + ")* space \"]\"");
  2335. if (data.thinking_forced_open) {
  2336. builder.add_rule("root", "\"</think>\" space \"<response>\" space [^<]* \"</response>\" space \"<|tool_call|>\" space " + tool_list);
  2337. } else {
  2338. builder.add_rule("root", "\"<|tool_call|>\" space " + tool_list);
  2339. }
  2340. data.grammar_triggers.push_back({
  2341. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  2342. "<|tool_call|>"
  2343. });
  2344. data.preserved_tokens = {
  2345. "<think>",
  2346. "</think>",
  2347. "<response>",
  2348. "</response>",
  2349. "<|tool_call|>",
  2350. };
  2351. });
  2352. } else {
  2353. // Handle thinking tags for non-tool responses
  2354. if (data.thinking_forced_open && inputs.enable_thinking) {
  2355. data.grammar_lazy = false;
  2356. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2357. builder.add_rule("root", "\"</think>\" space \"<response>\" space .* \"</response>\" space");
  2358. });
  2359. data.preserved_tokens = {
  2360. "<think>",
  2361. "</think>",
  2362. "<response>",
  2363. "</response>",
  2364. };
  2365. }
  2366. }
  2367. return data;
  2368. }
  2369. static common_chat_params common_chat_params_init_solar_open(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2370. common_chat_params data;
  2371. // TODO: Reasoning effort
  2372. json additional_context = {};
  2373. data.prompt = apply(tmpl, inputs, std::nullopt, std::nullopt, additional_context);
  2374. data.format = COMMON_CHAT_FORMAT_SOLAR_OPEN;
  2375. data.preserved_tokens = {
  2376. "<|think|>",
  2377. "<|content|>",
  2378. "<|begin|>",
  2379. "<|end|>",
  2380. };
  2381. // TODO: Tool calling
  2382. return data;
  2383. }
  2384. static common_chat_params common_chat_params_init_exaone_moe(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2385. common_chat_params data;
  2386. data.prompt = apply(tmpl, inputs);
  2387. data.format = COMMON_CHAT_FORMAT_EXAONE_MOE;
  2388. if (string_ends_with(data.prompt, "<think>\n")) {
  2389. if (!inputs.enable_thinking) {
  2390. data.prompt += "</think>\n\n";
  2391. } else {
  2392. data.thinking_forced_open = true;
  2393. }
  2394. }
  2395. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  2396. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  2397. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2398. std::vector<std::string> tool_rules;
  2399. foreach_function(inputs.tools, [&](const json & tool) {
  2400. const auto & function = tool.at("function");
  2401. std::string name = function.at("name");
  2402. auto parameters = function.at("parameters");
  2403. builder.resolve_refs(parameters);
  2404. // Expect: <tool_call>{"name": "<name>", "arguments": {...}}</tool_call>
  2405. tool_rules.push_back(builder.add_rule(
  2406. name + "-call",
  2407. "\"<tool_call>\" space " +
  2408. builder.add_schema(name + "-obj", json{
  2409. {"type", "object"},
  2410. {"properties", {
  2411. {"name", json{{"const", name}}},
  2412. {"arguments", parameters},
  2413. }},
  2414. {"required", json::array({"name", "arguments"})},
  2415. }) +
  2416. " space \"</tool_call>\" space"));
  2417. });
  2418. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | "));
  2419. builder.add_rule("root",
  2420. std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
  2421. (inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call));
  2422. data.grammar_triggers.push_back({
  2423. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
  2424. std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)?" : "") +
  2425. "(<tool_call>)[\\s\\S]*"
  2426. });
  2427. data.preserved_tokens = {
  2428. "<think>",
  2429. "</think>",
  2430. "<tool_call>",
  2431. "</tool_call>",
  2432. };
  2433. });
  2434. }
  2435. return data;
  2436. }
  2437. static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
  2438. common_chat_params data;
  2439. data.prompt = apply(tmpl, inputs);
  2440. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  2441. data.grammar_lazy = false;
  2442. if (!inputs.json_schema.is_null()) {
  2443. if (!inputs.grammar.empty()) {
  2444. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  2445. }
  2446. data.grammar = json_schema_to_grammar(inputs.json_schema);
  2447. } else {
  2448. data.grammar = inputs.grammar;
  2449. }
  2450. return data;
  2451. }
  2452. static common_chat_params common_chat_params_init_seed_oss(
  2453. const common_chat_template & tmpl,
  2454. templates_params & params,
  2455. const common_chat_templates_inputs & inputs)
  2456. {
  2457. common_chat_params data;
  2458. data.prompt = apply(tmpl, params);
  2459. data.format = COMMON_CHAT_FORMAT_SEED_OSS;
  2460. if (string_ends_with(data.prompt, "<seed:think>")) {
  2461. if (!inputs.enable_thinking) {
  2462. data.prompt += "</seed:think>";
  2463. } else {
  2464. data.thinking_forced_open = true;
  2465. }
  2466. }
  2467. if (params.tools.is_array() && !params.tools.empty()) {
  2468. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  2469. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  2470. std::vector<std::string> tool_rules;
  2471. foreach_function(params.tools, [&](const json & tool) {
  2472. const auto & function = tool.at("function");
  2473. std::string name = function.at("name");
  2474. auto parameters = function.at("parameters");
  2475. builder.resolve_refs(parameters);
  2476. // Create rule for Seed-OSS function call format
  2477. std::string param_rules;
  2478. if (parameters.contains("properties")) {
  2479. for (const auto & [key, value] : parameters.at("properties").items()) {
  2480. param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
  2481. "\"</parameter>\"";
  2482. }
  2483. }
  2484. tool_rules.push_back(builder.add_rule(name + "-call",
  2485. "\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
  2486. param_rules +
  2487. " \"</function>\" space \"</seed:tool_call>\""));
  2488. });
  2489. data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
  2490. data.preserved_tokens = {
  2491. "<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
  2492. "<function=", "</function>", "<parameter=", "</parameter>",
  2493. };
  2494. builder.add_rule("root", string_join(tool_rules, " | "));
  2495. });
  2496. }
  2497. return data;
  2498. }
  2499. // various workarounds for known issues with certain templates or model behaviors
  2500. // TODO @ngxson : improve this (how?)
  2501. namespace workaround {
  2502. // if first message is system and template does not support it, merge it with next message
  2503. static void system_message_not_supported(json & messages) {
  2504. if (!messages.empty() && messages.front().at("role") == "system") {
  2505. if (messages.size() > 1) {
  2506. LOG_DBG("Merging system prompt into next message\n");
  2507. auto & first_msg = messages.front();
  2508. auto & second_msg = messages[1];
  2509. second_msg["content"] = first_msg.at("content").get<std::string>()
  2510. + "\n" + second_msg.at("content").get<std::string>();
  2511. messages.erase(messages.begin());
  2512. } else {
  2513. LOG_WRN("Removing system prompt due to template not supporting system role\n");
  2514. messages.erase(messages.begin());
  2515. }
  2516. }
  2517. }
  2518. static void func_args_not_string(json & messages) {
  2519. GGML_ASSERT(messages.is_array());
  2520. for (auto & message : messages) {
  2521. if (message.contains("tool_calls")) {
  2522. for (auto & tool_call : message["tool_calls"]) {
  2523. if (tool_call.contains("function") && tool_call["function"].contains("arguments")) {
  2524. auto & args = tool_call["function"]["arguments"];
  2525. if (args.is_string()) {
  2526. try {
  2527. args = json::parse(args.get<std::string>());
  2528. } catch (const std::exception & e) {
  2529. throw std::runtime_error("Failed to parse tool call arguments as JSON: " + std::string(e.what()));
  2530. }
  2531. }
  2532. }
  2533. }
  2534. }
  2535. }
  2536. }
  2537. static void move_tool_calls_to_content(json & messages, int indent_spaces = 2) {
  2538. GGML_ASSERT(messages.is_array());
  2539. for (auto & message : messages) {
  2540. if (message.contains("tool_calls")) {
  2541. auto tool_calls_new = json{
  2542. {"tool_calls", message.at("tool_calls")}
  2543. };
  2544. message.erase("tool_calls");
  2545. auto content = message.at("content");
  2546. std::string content_new = content.is_null() ? "" : content.get<std::string>();
  2547. message["content"] = content_new + tool_calls_new.dump(indent_spaces, ' ', false, json::error_handler_t::replace);
  2548. }
  2549. }
  2550. }
  2551. // TODO @ngxson : we may remove support for generic schema in the future
  2552. static void use_generic_schema(json & messages) {
  2553. GGML_ASSERT(messages.is_array());
  2554. for (auto & message : messages) {
  2555. if (message.contains("tool_calls") && message.at("tool_calls").is_array()) {
  2556. auto & tool_calls = message.at("tool_calls");
  2557. for (auto & tool_call : tool_calls) {
  2558. if (tool_call.contains("type") && tool_call.at("type") == "function" &&
  2559. tool_call.contains("function") && tool_call.at("function").is_object()) {
  2560. // Copy values before erasing to avoid use-after-free
  2561. json name_value;
  2562. json arguments_value;
  2563. json id_value;
  2564. const auto & function = tool_call.at("function");
  2565. if (function.contains("name")) {
  2566. name_value = function.at("name");
  2567. }
  2568. if (function.contains("arguments")) {
  2569. arguments_value = function.at("arguments");
  2570. }
  2571. if (tool_call.contains("id")) {
  2572. id_value = tool_call.at("id");
  2573. }
  2574. // Now safely erase and assign in the correct order
  2575. tool_call.erase("type");
  2576. tool_call.erase("function");
  2577. tool_call.erase("id");
  2578. // Reassign in desired order: name, arguments, id
  2579. if (!name_value.is_null()) {
  2580. tool_call["name"] = name_value;
  2581. }
  2582. if (!arguments_value.is_null()) {
  2583. tool_call["arguments"] = arguments_value;
  2584. }
  2585. if (!id_value.is_null()) {
  2586. tool_call["id"] = id_value;
  2587. }
  2588. }
  2589. }
  2590. }
  2591. }
  2592. }
  2593. } // namespace workaround
  2594. static common_chat_params common_chat_templates_apply_jinja(
  2595. const struct common_chat_templates * tmpls,
  2596. const struct common_chat_templates_inputs & inputs)
  2597. {
  2598. templates_params params;
  2599. params.tools = common_chat_tools_to_json_oaicompat<json>(inputs.tools);
  2600. const auto & tmpl = params.tools.is_array() && tmpls->template_tool_use
  2601. ? *tmpls->template_tool_use
  2602. : *tmpls->template_default;
  2603. const auto & src = tmpl.source();
  2604. const auto & caps = tmpl.original_caps();
  2605. params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content);
  2606. params.add_generation_prompt = inputs.add_generation_prompt;
  2607. params.tool_choice = inputs.tool_choice;
  2608. params.reasoning_format = inputs.reasoning_format;
  2609. params.enable_thinking = inputs.enable_thinking;
  2610. params.grammar = inputs.grammar;
  2611. params.now = inputs.now;
  2612. params.add_bos = tmpls->add_bos;
  2613. params.add_eos = tmpls->add_eos;
  2614. if (!tmpl.original_caps().supports_system_role) {
  2615. workaround::system_message_not_supported(params.messages);
  2616. }
  2617. params.extra_context = json::object();
  2618. for (auto el : inputs.chat_template_kwargs) {
  2619. params.extra_context[el.first] = json::parse(el.second);
  2620. }
  2621. if (!inputs.json_schema.empty()) {
  2622. params.json_schema = json::parse(inputs.json_schema);
  2623. }
  2624. if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
  2625. LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
  2626. params.parallel_tool_calls = false;
  2627. } else {
  2628. params.parallel_tool_calls = inputs.parallel_tool_calls;
  2629. }
  2630. if (params.tools.is_array()) {
  2631. if (params.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && !params.grammar.empty()) {
  2632. throw std::runtime_error("Cannot specify grammar with tools");
  2633. }
  2634. if (caps.supports_tool_calls && !caps.supports_tools) {
  2635. LOG_WRN("Template supports tool calls but does not natively describe tools. The fallback behaviour used may produce bad results, inspect prompt w/ --verbose & consider overriding the template.\n");
  2636. }
  2637. }
  2638. // DeepSeek V3.1: detect based on specific patterns in the template
  2639. if (src.find("message['prefix'] is defined and message['prefix'] and thinking") != std::string::npos &&
  2640. params.json_schema.is_null()) {
  2641. return common_chat_params_init_deepseek_v3_1(tmpl, params);
  2642. }
  2643. // DeepSeek R1: use handler in all cases except json schema (thinking / tools).
  2644. if (src.find("<|tool▁calls▁begin|>") != std::string::npos && params.json_schema.is_null()) {
  2645. return common_chat_params_init_deepseek_r1(tmpl, params);
  2646. }
  2647. // Command R7B: : use handler in all cases except json schema (thinking / tools).
  2648. if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos && params.json_schema.is_null()) {
  2649. workaround::func_args_not_string(params.messages);
  2650. return common_chat_params_init_command_r7b(tmpl, params);
  2651. }
  2652. // Granite (IBM) - detects thinking / tools support
  2653. if (src.find("elif thinking") != std::string::npos && src.find("<|tool_call|>") != std::string::npos) {
  2654. workaround::func_args_not_string(params.messages);
  2655. workaround::use_generic_schema(params.messages);
  2656. workaround::move_tool_calls_to_content(params.messages);
  2657. return common_chat_params_init_granite(tmpl, params);
  2658. }
  2659. // GLM 4.5: detect by <arg_key> and <arg_value> tags (check before Hermes since both use <tool_call>)
  2660. if (src.find("[gMASK]<sop>") != std::string::npos &&
  2661. src.find("<arg_key>") != std::string::npos &&
  2662. src.find("<arg_value>") != std::string::npos &&
  2663. params.json_schema.is_null()) {
  2664. workaround::func_args_not_string(params.messages);
  2665. return common_chat_params_init_glm_4_5(tmpl, params);
  2666. }
  2667. // Qwen3-Coder XML format detection (must come before Hermes 2 Pro)
  2668. // Detect via explicit XML markers unique to Qwen3-Coder to avoid false positives in other templates.
  2669. // Require presence of <tool_call>, <function=...>, and <parameter=...> blocks.
  2670. if (src.find("<tool_call>") != std::string::npos &&
  2671. src.find("<function>") != std::string::npos &&
  2672. src.find("<function=") != std::string::npos &&
  2673. src.find("<parameters>") != std::string::npos &&
  2674. src.find("<parameter=") != std::string::npos) {
  2675. workaround::func_args_not_string(params.messages);
  2676. // Nemotron 3 Nano 30B A3B
  2677. if (src.find("<think>") != std::string::npos) {
  2678. return common_chat_params_init_nemotron_v3(tmpl, params);
  2679. }
  2680. return common_chat_params_init_qwen3_coder_xml(tmpl, params);
  2681. }
  2682. // Xiaomi MiMo format detection (must come before Hermes 2 Pro)
  2683. if (src.find("<tools>") != std::string::npos &&
  2684. src.find("# Tools") != std::string::npos &&
  2685. src.find("</tools>") != std::string::npos &&
  2686. src.find("<tool_calls>") != std::string::npos &&
  2687. src.find("</tool_calls>") != std::string::npos &&
  2688. src.find("<tool_response>") != std::string::npos) {
  2689. return common_chat_params_init_xiaomi_mimo(tmpl, params);
  2690. }
  2691. // EXAONE MoE format detection
  2692. if (src.find("<tool_call>") != std::string::npos &&
  2693. src.find("<tool_result>") != std::string::npos &&
  2694. src.find("<|tool_declare|>") != std::string::npos) {
  2695. return common_chat_params_init_exaone_moe(tmpl, params);
  2696. }
  2697. // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
  2698. if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
  2699. return common_chat_params_init_hermes_2_pro(tmpl, params);
  2700. }
  2701. // GPT-OSS
  2702. if (src.find("<|channel|>") != std::string::npos) {
  2703. return common_chat_params_init_gpt_oss(tmpl, params);
  2704. }
  2705. // Seed-OSS
  2706. if (src.find("<seed:think>") != std::string::npos) {
  2707. workaround::func_args_not_string(params.messages);
  2708. return common_chat_params_init_seed_oss(tmpl, params, inputs);
  2709. }
  2710. // Nemotron v2
  2711. if (src.find("<SPECIAL_10>") != std::string::npos) {
  2712. return common_chat_params_init_nemotron_v2(tmpl, params);
  2713. }
  2714. // Apertus format detection
  2715. if (src.find("<|system_start|>") != std::string::npos && src.find("<|tools_prefix|>") != std::string::npos) {
  2716. return common_chat_params_init_apertus(tmpl, params);
  2717. }
  2718. // LFM2 (w/ tools)
  2719. if (src.find("List of tools: <|tool_list_start|>[") != std::string::npos &&
  2720. src.find("]<|tool_list_end|>") != std::string::npos) {
  2721. return common_chat_params_init_lfm2(tmpl, params);
  2722. }
  2723. // MiniMax-M2 format detection
  2724. if (src.find("]~!b[") != std::string::npos && src.find("]~b]") != std::string::npos) {
  2725. workaround::func_args_not_string(params.messages);
  2726. return common_chat_params_init_minimax_m2(tmpl, params);
  2727. }
  2728. // Kimi K2 format detection
  2729. if (src.find("<|im_system|>tool_declare<|im_middle|>") != std::string::npos &&
  2730. src.find("<|tool_calls_section_begin|>") != std::string::npos &&
  2731. src.find("## Return of") != std::string::npos) {
  2732. return common_chat_params_init_kimi_k2(tmpl, params);
  2733. }
  2734. // Apriel 1.5 format detection
  2735. if (src.find("<thinking>") != std::string::npos &&
  2736. src.find("</thinking>") != std::string::npos &&
  2737. src.find("<available_tools>") != std::string::npos &&
  2738. src.find("<|assistant|>") != std::string::npos &&
  2739. src.find("<|tool_result|>") != std::string::npos &&
  2740. src.find("<tool_calls>[") != std::string::npos &&
  2741. src.find("]</tool_calls>") != std::string::npos) {
  2742. return common_chat_params_init_apriel_1_5(tmpl, params);
  2743. }
  2744. // Use generic handler when mixing tools + JSON schema.
  2745. // TODO: support that mix in handlers below.
  2746. if ((params.tools.is_array() && params.json_schema.is_object())) {
  2747. return common_chat_params_init_generic(tmpl, params);
  2748. }
  2749. // Functionary prepends "all\n" to plain content outputs, so we use its handler in all cases.
  2750. if (src.find(">>>all") != std::string::npos) {
  2751. return common_chat_params_init_functionary_v3_2(tmpl, params);
  2752. }
  2753. // Firefunction v2 requires datetime and functions in the context even w/o tools, so we also use its handler in all cases.
  2754. if (src.find(" functools[") != std::string::npos) {
  2755. return common_chat_params_init_firefunction_v2(tmpl, params);
  2756. }
  2757. // Functionary v3.1 (w/ tools)
  2758. if (src.find("<|start_header_id|>") != std::string::npos
  2759. && src.find("<function=") != std::string::npos) {
  2760. return common_chat_params_init_functionary_v3_1_llama_3_1(tmpl, params);
  2761. }
  2762. // Llama 3.1, 3.2, 3.3 (also requires date_string so using it even w/o tools)
  2763. if (src.find("<|start_header_id|>ipython<|end_header_id|>") != std::string::npos) {
  2764. auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos;
  2765. workaround::func_args_not_string(params.messages);
  2766. return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools);
  2767. }
  2768. // Ministral/Mistral Large 3
  2769. if (src.find("[SYSTEM_PROMPT]") != std::string::npos &&
  2770. src.find("[TOOL_CALLS]") != std::string::npos &&
  2771. src.find("[ARGS]") != std::string::npos) {
  2772. return common_chat_params_init_ministral_3(tmpl, params);
  2773. }
  2774. if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) {
  2775. return common_chat_params_init_magistral(tmpl, params);
  2776. }
  2777. // Solar Open
  2778. if (src.find("<|tool_response:begin|>") != std::string::npos &&
  2779. src.find("<|tool_response:name|>") != std::string::npos &&
  2780. src.find("<|tool_response:result|>") != std::string::npos) {
  2781. return common_chat_params_init_solar_open(tmpl, params);
  2782. }
  2783. // Plain handler (no tools)
  2784. if (params.tools.is_null() || inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_NONE) {
  2785. return common_chat_params_init_without_tools(tmpl, params);
  2786. }
  2787. // Mistral Nemo (w/ tools)
  2788. if (src.find("[TOOL_CALLS]") != std::string::npos) {
  2789. workaround::func_args_not_string(params.messages);
  2790. return common_chat_params_init_mistral_nemo(tmpl, params);
  2791. }
  2792. // Generic fallback
  2793. workaround::func_args_not_string(params.messages);
  2794. workaround::use_generic_schema(params.messages);
  2795. workaround::move_tool_calls_to_content(params.messages);
  2796. return common_chat_params_init_generic(tmpl, params);
  2797. }
  2798. // Legacy template route (adhoc C++ implementation of known templates), forward to llama_chat_apply_template.
  2799. static common_chat_params common_chat_templates_apply_legacy(
  2800. const struct common_chat_templates * tmpls,
  2801. const struct common_chat_templates_inputs & inputs)
  2802. {
  2803. size_t alloc_size = 0;
  2804. std::vector<llama_chat_message> chat;
  2805. std::vector<std::string> contents;
  2806. for (const auto & msg : inputs.messages) {
  2807. auto content = msg.content;
  2808. for (const auto & part : msg.content_parts) {
  2809. if (part.type != "text") {
  2810. LOG_WRN("Ignoring non-text content part: %s\n", part.type.c_str());
  2811. continue;
  2812. }
  2813. if (!content.empty()) {
  2814. content += "\n";;
  2815. }
  2816. content += part.text;
  2817. }
  2818. contents.emplace_back(std::move(content));
  2819. }
  2820. for (size_t i = 0; i < contents.size(); ++i) {
  2821. const auto & msg = inputs.messages[i];
  2822. const auto & content = contents[i];
  2823. chat.push_back({msg.role.c_str(), content.c_str()});
  2824. size_t msg_size = msg.role.size() + content.size();
  2825. alloc_size += msg_size + (msg_size / 4); // == msg_size * 1.25 but avoiding float ops
  2826. }
  2827. std::vector<char> buf(alloc_size);
  2828. // run the first time to get the total output length
  2829. const auto & src = tmpls->template_default->source();
  2830. int32_t res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2831. // error: chat template is not supported
  2832. if (res < 0) {
  2833. // if the custom "tmpl" is not supported, we throw an error
  2834. // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
  2835. throw std::runtime_error("this custom template is not supported, try using --jinja");
  2836. }
  2837. // if it turns out that our buffer is too small, we resize it
  2838. if ((size_t) res > buf.size()) {
  2839. buf.resize(res);
  2840. res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  2841. }
  2842. // for safety, we check the result again
  2843. if (res < 0 || (size_t) res > buf.size()) {
  2844. throw std::runtime_error("failed to apply chat template, try using --jinja");
  2845. }
  2846. common_chat_params params;
  2847. params.prompt = std::string(buf.data(), res);
  2848. if (!inputs.json_schema.empty()) {
  2849. params.grammar = json_schema_to_grammar(json::parse(inputs.json_schema));
  2850. } else {
  2851. params.grammar = inputs.grammar;
  2852. }
  2853. return params;
  2854. }
  2855. common_chat_params common_chat_templates_apply(
  2856. const struct common_chat_templates * tmpls,
  2857. const struct common_chat_templates_inputs & inputs)
  2858. {
  2859. GGML_ASSERT(tmpls != nullptr);
  2860. return inputs.use_jinja
  2861. ? common_chat_templates_apply_jinja(tmpls, inputs)
  2862. : common_chat_templates_apply_legacy(tmpls, inputs);
  2863. }