chat.cpp 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779
  1. #include "chat.h"
  2. #include "json-schema-to-grammar.h"
  3. #include "log.h"
  4. #include "minja/chat-template.hpp"
  5. #include "minja/minja.hpp"
  6. #include <optional>
  7. typedef minja::chat_template common_chat_template;
  8. struct common_chat_templates {
  9. bool has_explicit_template; // Model had builtin template or template overridde was specified.
  10. std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
  11. std::unique_ptr<common_chat_template> template_tool_use;
  12. };
  13. struct templates_params {
  14. json messages;
  15. json tools;
  16. common_chat_tool_choice tool_choice;
  17. json json_schema;
  18. bool parallel_tool_calls;
  19. bool stream;
  20. std::string grammar;
  21. bool add_generation_prompt = true;
  22. bool extract_reasoning = true;
  23. };
  24. common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice) {
  25. if (tool_choice == "auto") {
  26. return COMMON_CHAT_TOOL_CHOICE_AUTO;
  27. }
  28. if (tool_choice == "none") {
  29. return COMMON_CHAT_TOOL_CHOICE_NONE;
  30. }
  31. if (tool_choice == "required") {
  32. return COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  33. }
  34. throw std::runtime_error("Invalid tool_choice: " + tool_choice);
  35. }
  36. template <>
  37. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const json & messages) {
  38. std::vector<common_chat_msg> msgs;
  39. try {
  40. if (!messages.is_array()) {
  41. throw std::runtime_error("Expected 'messages' to be an array, got " + messages.dump());
  42. }
  43. for (const auto & message : messages) {
  44. if (!message.is_object()) {
  45. throw std::runtime_error("Expected 'message' to be an object, got " + message.dump());
  46. }
  47. common_chat_msg msg;
  48. if (!message.contains("role")) {
  49. throw std::runtime_error("Missing 'role' in message: " + message.dump());
  50. }
  51. msg.role = message.at("role");
  52. auto has_content = message.contains("content");
  53. auto has_tool_calls = message.contains("tool_calls");
  54. if (has_content) {
  55. const auto & content = message.at("content");
  56. if (content.is_string()) {
  57. msg.content = content;
  58. } else if (content.is_array()) {
  59. for (const auto & part : content) {
  60. if (!part.contains("type")) {
  61. throw std::runtime_error("Missing content part type: " + part.dump());
  62. }
  63. const auto & type = part.at("type");
  64. if (type != "text") {
  65. throw std::runtime_error("Unsupported content part type: " + type.dump());
  66. }
  67. common_chat_msg_content_part msg_part;
  68. msg_part.type = type;
  69. msg_part.text = part.at("text");
  70. msg.content_parts.push_back(msg_part);
  71. }
  72. } else if (!content.is_null()) {
  73. throw std::runtime_error("Invalid 'content' type: expected string or array, got " + content.dump() + " (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
  74. }
  75. }
  76. if (has_tool_calls) {
  77. for (const auto & tool_call : message.at("tool_calls")) {
  78. common_chat_tool_call tc;
  79. if (!tool_call.contains("type")) {
  80. throw std::runtime_error("Missing tool call type: " + tool_call.dump());
  81. }
  82. const auto & type = tool_call.at("type");
  83. if (type != "function") {
  84. throw std::runtime_error("Unsupported tool call type: " + tool_call.dump());
  85. }
  86. if (!tool_call.contains("function")) {
  87. throw std::runtime_error("Missing tool call function: " + tool_call.dump());
  88. }
  89. const auto & fc = tool_call.at("function");
  90. if (!fc.contains("name")) {
  91. throw std::runtime_error("Missing tool call name: " + tool_call.dump());
  92. }
  93. tc.name = fc.at("name");
  94. tc.arguments = fc.at("arguments");
  95. if (tool_call.contains("id")) {
  96. tc.id = tool_call.at("id");
  97. }
  98. msg.tool_calls.push_back(tc);
  99. }
  100. }
  101. if (!has_content && !has_tool_calls) {
  102. throw std::runtime_error("Expected 'content' or 'tool_calls' (ref: https://github.com/ggml-org/llama.cpp/issues/8367 & https://github.com/ggml-org/llama.cpp/issues/12279)");
  103. }
  104. if (message.contains("reasoning_content")) {
  105. msg.reasoning_content = message.at("reasoning_content");
  106. }
  107. if (message.contains("name")) {
  108. msg.tool_name = message.at("name");
  109. }
  110. if (message.contains("tool_call_id")) {
  111. msg.tool_call_id = message.at("tool_call_id");
  112. }
  113. msgs.push_back(msg);
  114. }
  115. } catch (const std::exception & e) {
  116. throw std::runtime_error("Failed to parse messages: " + std::string(e.what()) + "; messages = " + messages.dump(2));
  117. }
  118. return msgs;
  119. }
  120. template <>
  121. json common_chat_msgs_to_json_oaicompat(const std::vector<common_chat_msg> & msgs, bool concat_typed_text) {
  122. json messages = json::array();
  123. for (const auto & msg : msgs) {
  124. if (!msg.content.empty() && !msg.content_parts.empty()) {
  125. throw std::runtime_error("Cannot specify both content and content_parts");
  126. }
  127. json jmsg {
  128. {"role", msg.role},
  129. };
  130. if (!msg.content.empty()) {
  131. jmsg["content"] = msg.content;
  132. } else if (!msg.content_parts.empty()) {
  133. if (concat_typed_text) {
  134. std::string text;
  135. for (const auto & part : msg.content_parts) {
  136. if (part.type != "text") {
  137. LOG_WRN("Ignoring content part type: %s\n", part.type.c_str());
  138. continue;
  139. }
  140. if (!text.empty()) {
  141. text += '\n';
  142. }
  143. text += part.text;
  144. }
  145. jmsg["content"] = text;
  146. } else {
  147. auto & parts = jmsg["content"] = json::array();
  148. for (const auto & part : msg.content_parts) {
  149. parts.push_back({
  150. {"type", part.type},
  151. {"text", part.text},
  152. });
  153. }
  154. }
  155. } else {
  156. jmsg["content"] = json(); // null
  157. }
  158. if (!msg.reasoning_content.empty()) {
  159. jmsg["reasoning_content"] = msg.reasoning_content;
  160. }
  161. if (!msg.tool_name.empty()) {
  162. jmsg["name"] = msg.tool_name;
  163. }
  164. if (!msg.tool_call_id.empty()) {
  165. jmsg["tool_call_id"] = msg.tool_call_id;
  166. }
  167. if (!msg.tool_calls.empty()) {
  168. auto & tool_calls = jmsg["tool_calls"] = json::array();
  169. for (const auto & tool_call : msg.tool_calls) {
  170. json tc {
  171. {"type", "function"},
  172. {"function", {
  173. {"name", tool_call.name},
  174. {"arguments", tool_call.arguments},
  175. }},
  176. };
  177. if (!tool_call.id.empty()) {
  178. tc["id"] = tool_call.id;
  179. }
  180. tool_calls.push_back(tc);
  181. }
  182. }
  183. messages.push_back(jmsg);
  184. }
  185. return messages;
  186. }
  187. template <>
  188. std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const std::string & messages) {
  189. return common_chat_msgs_parse_oaicompat(json::parse(messages));
  190. }
  191. template <>
  192. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const json & tools) {
  193. std::vector<common_chat_tool> result;
  194. try {
  195. if (!tools.is_null()) {
  196. if (!tools.is_array()) {
  197. throw std::runtime_error("Expected 'tools' to be an array, got " + tools.dump());
  198. }
  199. for (const auto & tool : tools) {
  200. if (!tool.contains("type")) {
  201. throw std::runtime_error("Missing tool type: " + tool.dump());
  202. }
  203. const auto & type = tool.at("type");
  204. if (!type.is_string() || type != "function") {
  205. throw std::runtime_error("Unsupported tool type: " + tool.dump());
  206. }
  207. if (!tool.contains("function")) {
  208. throw std::runtime_error("Missing tool function: " + tool.dump());
  209. }
  210. const auto & function = tool.at("function");
  211. result.push_back({
  212. /* .name = */ function.at("name"),
  213. /* .description = */ function.at("description"),
  214. /* .parameters = */ function.at("parameters").dump(),
  215. });
  216. }
  217. }
  218. } catch (const std::exception & e) {
  219. throw std::runtime_error("Failed to parse tools: " + std::string(e.what()) + "; tools = " + tools.dump(2));
  220. }
  221. return result;
  222. }
  223. template <>
  224. std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const std::string & tools) {
  225. return common_chat_tools_parse_oaicompat(json::parse(tools));
  226. }
  227. template <>
  228. json common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools) {
  229. if (tools.empty()) {
  230. return json();
  231. }
  232. auto result = json::array();
  233. for (const auto & tool : tools) {
  234. result.push_back({
  235. {"type", "function"},
  236. {"function", {
  237. {"name", tool.name},
  238. {"description", tool.description},
  239. {"parameters", json::parse(tool.parameters)},
  240. }},
  241. });
  242. }
  243. return result;
  244. }
  245. bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) {
  246. if (use_jinja) {
  247. try {
  248. common_chat_msg msg;
  249. msg.role = "user";
  250. msg.content = "test";
  251. auto tmpls = common_chat_templates_init(/* model= */ nullptr, tmpl);
  252. common_chat_templates_inputs inputs;
  253. inputs.messages = {msg};
  254. common_chat_templates_apply(tmpls.get(), inputs);
  255. return true;
  256. } catch (const std::exception & e) {
  257. LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what());
  258. return false;
  259. }
  260. }
  261. llama_chat_message chat[] = {{"user", "test"}};
  262. const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
  263. return res >= 0;
  264. }
  265. std::string common_chat_format_single(
  266. const struct common_chat_templates * tmpls,
  267. const std::vector<common_chat_msg> & past_msg,
  268. const common_chat_msg & new_msg,
  269. bool add_ass,
  270. bool use_jinja) {
  271. common_chat_templates_inputs inputs;
  272. inputs.use_jinja = use_jinja;
  273. std::string fmt_past_msg;
  274. if (!past_msg.empty()) {
  275. inputs.messages = past_msg;
  276. inputs.add_generation_prompt = false;
  277. fmt_past_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  278. }
  279. std::ostringstream ss;
  280. // if the past_msg ends with a newline, we must preserve it in the formatted version
  281. if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
  282. ss << "\n";
  283. };
  284. // format chat with new_msg
  285. inputs.messages.push_back(new_msg);
  286. inputs.add_generation_prompt = add_ass;
  287. auto fmt_new_msg = common_chat_templates_apply(tmpls, inputs).prompt;
  288. // get the diff part
  289. ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
  290. return ss.str();
  291. }
  292. std::string common_chat_format_example(const struct common_chat_templates * tmpls, bool use_jinja) {
  293. common_chat_templates_inputs inputs;
  294. inputs.use_jinja = use_jinja;
  295. auto add_simple_msg = [&](auto role, auto content) {
  296. common_chat_msg msg;
  297. msg.role = role;
  298. msg.content = content;
  299. inputs.messages.push_back(msg);
  300. };
  301. add_simple_msg("system", "You are a helpful assistant");
  302. add_simple_msg("user", "Hello");
  303. add_simple_msg("assistant", "Hi there");
  304. add_simple_msg("user", "How are you?");
  305. return common_chat_templates_apply(tmpls, inputs).prompt;
  306. }
  307. #define CHATML_TEMPLATE_SRC \
  308. "{%- for message in messages -%}\n" \
  309. " {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
  310. "{%- endfor -%}\n" \
  311. "{%- if add_generation_prompt -%}\n" \
  312. " {{- '<|im_start|>assistant\n' -}}\n" \
  313. "{%- endif -%}"
  314. void common_chat_templates_free(struct common_chat_templates * tmpls) {
  315. delete tmpls;
  316. }
  317. bool common_chat_templates_was_explicit(const struct common_chat_templates * tmpls) {
  318. return tmpls->has_explicit_template;
  319. }
  320. const char * common_chat_templates_source(const struct common_chat_templates * tmpls, const char * variant) {
  321. if (variant != nullptr) {
  322. if (strcmp(variant, "tool_use") == 0) {
  323. if (tmpls->template_tool_use) {
  324. return tmpls->template_tool_use->source().c_str();
  325. }
  326. return nullptr;
  327. } else {
  328. LOG_DBG("%s: unknown template variant: %s\n", __func__, variant);
  329. }
  330. }
  331. return tmpls->template_default->source().c_str();
  332. }
  333. common_chat_templates_ptr common_chat_templates_init(
  334. const struct llama_model * model,
  335. const std::string & chat_template_override,
  336. const std::string & bos_token_override,
  337. const std::string & eos_token_override)
  338. {
  339. std::string default_template_src;
  340. std::string template_tool_use_src;
  341. bool has_explicit_template = !chat_template_override.empty();
  342. if (chat_template_override.empty()) {
  343. GGML_ASSERT(model != nullptr);
  344. const auto * str = llama_model_chat_template(model, /* name */ nullptr);
  345. if (str) {
  346. default_template_src = str;
  347. has_explicit_template = true;
  348. }
  349. str = llama_model_chat_template(model, /* name */ "tool_use");
  350. if (str) {
  351. template_tool_use_src = str;
  352. has_explicit_template = true;
  353. }
  354. } else {
  355. default_template_src = chat_template_override;
  356. }
  357. if (default_template_src.empty() || default_template_src == "chatml") {
  358. if (!template_tool_use_src.empty()) {
  359. default_template_src = template_tool_use_src;
  360. } else {
  361. default_template_src = CHATML_TEMPLATE_SRC;
  362. }
  363. }
  364. std::string token_bos = bos_token_override;
  365. std::string token_eos = eos_token_override;
  366. if (model) {
  367. const auto * vocab = llama_model_get_vocab(model);
  368. const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
  369. if (token == LLAMA_TOKEN_NULL) {
  370. if (default_template_src.find(jinja_variable_name) != std::string::npos
  371. || template_tool_use_src.find(jinja_variable_name) != std::string::npos) {
  372. LOG_WRN("common_chat_templates_init: warning: vocab does not have a %s token, jinja template won't work as intended.\n", name);
  373. }
  374. return std::string();
  375. }
  376. return common_token_to_piece(vocab, token, true);
  377. };
  378. token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
  379. token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
  380. }
  381. common_chat_templates_ptr tmpls(new common_chat_templates());
  382. tmpls->has_explicit_template = has_explicit_template;
  383. try {
  384. tmpls->template_default = std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos);
  385. } catch (const std::exception & e) {
  386. LOG_ERR("%s: failed to parse chat template (defaulting to chatml): %s \n", __func__, e.what());
  387. tmpls->template_default = std::make_unique<minja::chat_template>(CHATML_TEMPLATE_SRC, token_bos, token_eos);
  388. }
  389. if (!template_tool_use_src.empty()) {
  390. try {
  391. tmpls->template_tool_use = std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos);
  392. } catch (const std::exception & e) {
  393. LOG_ERR("%s: failed to parse tool use chat template (ignoring it): %s\n", __func__, e.what());
  394. }
  395. }
  396. return tmpls;
  397. }
  398. std::string common_chat_format_name(common_chat_format format) {
  399. switch (format) {
  400. case COMMON_CHAT_FORMAT_CONTENT_ONLY: return "Content-only";
  401. case COMMON_CHAT_FORMAT_GENERIC: return "Generic";
  402. case COMMON_CHAT_FORMAT_MISTRAL_NEMO: return "Mistral Nemo";
  403. case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x";
  404. case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools";
  405. case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1";
  406. case COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING: return "DeepSeek R1 (extract reasoning)";
  407. case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2";
  408. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2";
  409. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1";
  410. case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro";
  411. case COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING: return "Hermes 2 Pro (extract reasoning)";
  412. case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
  413. case COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING: return "Command R7B (extract reasoning)";
  414. default:
  415. throw std::runtime_error("Unknown chat format");
  416. }
  417. }
  418. static bool parse_json(std::string::const_iterator & it, const std::string::const_iterator & end, json & out) {
  419. // // https://json.nlohmann.me/features/parsing/sax_interface/
  420. struct json_error_locator : public nlohmann::json_sax<json> {
  421. std::size_t position;
  422. bool found_error;
  423. json_error_locator() : position(0), found_error(false) {}
  424. bool parse_error(std::size_t position, const std::string &, const json::exception &) override { // NOLINT
  425. this->position = position - 1;
  426. this->found_error = true;
  427. return false;
  428. }
  429. bool null() override { return true; } // NOLINT
  430. bool boolean(bool) override { return true; } // NOLINT
  431. bool number_integer(number_integer_t) override { return true; } // NOLINT
  432. bool number_unsigned(number_unsigned_t) override { return true; } // NOLINT
  433. bool number_float(number_float_t, const string_t &) override { return true; } // NOLINT
  434. bool string(string_t &) override { return true; } // NOLINT
  435. bool binary(binary_t &) override { return true; } // NOLINT
  436. bool start_object(std::size_t) override { return true; } // NOLINT
  437. bool key(string_t &) override { return true; } // NOLINT
  438. bool end_object() override { return true; }
  439. bool start_array(std::size_t) override { return true; } // NOLINT
  440. bool end_array() override { return true; }
  441. };
  442. json_error_locator err_loc;
  443. json::sax_parse(it, end, &err_loc);
  444. std::string::const_iterator temptative_end;
  445. if (err_loc.found_error) {
  446. temptative_end = it + err_loc.position;
  447. } else {
  448. temptative_end = end;
  449. }
  450. std::string json_sub {it, temptative_end};
  451. try {
  452. out = json::parse(json_sub);
  453. it = temptative_end;
  454. return true;
  455. } catch (const std::exception &) {
  456. return false;
  457. }
  458. }
  459. static bool parse_literal(std::string::const_iterator & it, const std::string::const_iterator & end, const std::string & expected) {
  460. auto expected_it = expected.begin();
  461. auto tmp_it = it;
  462. while (tmp_it != end && expected_it != expected.end() && *tmp_it == *expected_it) {
  463. ++tmp_it;
  464. ++expected_it;
  465. }
  466. if (expected_it == expected.end()) {
  467. it = tmp_it;
  468. return true;
  469. }
  470. return false;
  471. }
  472. static std::optional<std::smatch> parse_pattern(std::string::const_iterator & it, const std::string::const_iterator & end, const std::regex & expected) {
  473. std::smatch match;
  474. if (std::regex_match(it, end, match, expected)) {
  475. it = match.suffix().first;
  476. return match;
  477. }
  478. return std::nullopt;
  479. }
  480. static void consume_spaces(std::string::const_iterator & it, const std::string::const_iterator & end) {
  481. while (it != end && std::isspace(*it)) {
  482. ++it;
  483. }
  484. }
  485. /**
  486. * Takes a prefix regex that must have 1 group to capture the function name, a closing suffix, and expects json parameters in between.
  487. * Aggregates the prefix, suffix and in-between text into the content.
  488. */
  489. static common_chat_msg parse_json_tool_calls(
  490. const std::string& input,
  491. const std::optional<std::regex> & trigger_opt,
  492. const std::regex & function_regex,
  493. const std::regex & close_regex,
  494. bool allow_raw_python = false) {
  495. std::smatch match;
  496. common_chat_msg result;
  497. result.role = "assistant";
  498. auto end = input.end();
  499. auto it = input.begin();
  500. if (trigger_opt) {
  501. if (!std::regex_search(it, end, match, *trigger_opt)) {
  502. result.content = input;
  503. return result;
  504. }
  505. result.content = match.prefix().str();
  506. it = match.suffix().first;
  507. }
  508. while (it != end) {
  509. std::sregex_iterator rend;
  510. std::sregex_iterator rit(it, end, function_regex);
  511. if (rit == rend) {
  512. result.content += std::string(it, end);
  513. break;
  514. }
  515. auto name = rit->str(1);
  516. result.content += std::string(it, rit->prefix().second);
  517. it = rit->suffix().first;
  518. json arguments;
  519. if (parse_json(it, end, arguments)) {
  520. if (!std::regex_search(it, end, match, close_regex)) {
  521. throw std::runtime_error("Malformed input, missing closing pattern: " + input);
  522. }
  523. it = match.suffix().first;
  524. result.tool_calls.push_back({name, arguments.is_string() ? arguments.get<std::string>() : arguments.dump(), /* id= */ ""});
  525. } else {
  526. if (allow_raw_python && name == "python") {
  527. result.tool_calls.push_back({name, json({{"code", std::string(it, end)}}).dump(), /* id= */ ""});
  528. break;
  529. }
  530. throw std::runtime_error("Failed to parse json tool call arguments: " + input);
  531. }
  532. }
  533. if (!result.tool_calls.empty()) {
  534. if (!string_strip(result.content).empty()) {
  535. LOG_WRN("Content found with tool calls: %s\n", result.content.c_str());
  536. }
  537. result.content = "";
  538. }
  539. return result;
  540. }
  541. static common_chat_tool_call process_tool_call(const json & tool_call) {
  542. const auto & arguments = tool_call.at("arguments");
  543. return {
  544. /* .name = */ tool_call.at("name"),
  545. /* .arguments = */ arguments.is_string() ? arguments.get<std::string>() : arguments.dump(),
  546. /* .id = */ tool_call.contains("id") ? tool_call.at("id") : "",
  547. };
  548. }
  549. static common_chat_msg parse_prefixed_json_tool_call_array(const std::string& input, const std::string & prefix, size_t rstrip_prefix = 0) {
  550. auto content_end = input.find(prefix);
  551. size_t tc_start = std::string::npos;
  552. common_chat_msg result;
  553. result.role = "assistant";
  554. if (content_end == std::string::npos) {
  555. result.content = input;
  556. } else {
  557. tc_start = content_end + prefix.size() - rstrip_prefix;
  558. result.content = input.substr(0, content_end);
  559. auto tool_calls = json::parse(input.substr(tc_start));
  560. for (const auto & tool_call : tool_calls) {
  561. result.tool_calls.emplace_back(process_tool_call(tool_call));
  562. }
  563. }
  564. return result;
  565. }
  566. static void foreach_function(const json & tools, const std::function<void(const json &)> & fn) {
  567. for (const auto & tool : tools) {
  568. if (!tool.contains("type") || tool.at("type") != "function" || !tool.contains("function")) {
  569. LOG_INF("Skipping tool without function: %s", tool.dump(2).c_str());
  570. continue;
  571. }
  572. fn(tool);
  573. }
  574. }
  575. static std::string apply(
  576. const common_chat_template & tmpl,
  577. const nlohmann::ordered_json & messages,
  578. const nlohmann::ordered_json & tools,
  579. bool add_generation_prompt,
  580. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json())
  581. {
  582. minja::chat_template_inputs tmpl_inputs;
  583. tmpl_inputs.messages = messages;
  584. tmpl_inputs.tools = tools;
  585. tmpl_inputs.add_generation_prompt = add_generation_prompt;
  586. tmpl_inputs.extra_context = extra_context;
  587. // TODO: add flag to control date/time, if only for testing purposes.
  588. // tmpl_inputs.now = std::chrono::system_clock::now();
  589. minja::chat_template_options tmpl_opts;
  590. // To avoid double BOS / EOS tokens, we're manually removing begining / trailing tokens
  591. // instead of using `chat_template_options.use_bos_token = false`, since these tokens
  592. // may be needed inside the template / between messages too.
  593. auto result = tmpl.apply(tmpl_inputs, tmpl_opts);
  594. if (string_starts_with(result, tmpl.bos_token())) {
  595. result = result.substr(tmpl.bos_token().size());
  596. }
  597. if (string_ends_with(result, tmpl.eos_token())) {
  598. result = result.substr(0, result.size() - tmpl.eos_token().size());
  599. }
  600. return result;
  601. }
  602. static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct templates_params & inputs) {
  603. common_chat_params data;
  604. auto tool_call_schemas = json::array();
  605. foreach_function(inputs.tools, [&](const json & tool) {
  606. const auto & function = tool.at("function");
  607. auto tool_schema = json {
  608. {"type", "object"},
  609. {"properties", {
  610. {"name", {
  611. {"type", "string"},
  612. {"const", function.at("name")},
  613. }},
  614. {"arguments", function.at("parameters")},
  615. }},
  616. {"required", json::array({"name", "arguments"})},
  617. };
  618. if (function.contains("description")) {
  619. tool_schema["description"] = function.at("description");
  620. }
  621. if (inputs.parallel_tool_calls) {
  622. tool_schema.at("properties")["id"] = {
  623. {"type", "string"},
  624. {"minLength", 4},
  625. };
  626. tool_schema.at("required").push_back("id");
  627. }
  628. tool_call_schemas.emplace_back(tool_schema);
  629. });
  630. const auto tool_call =
  631. inputs.parallel_tool_calls
  632. ? json {
  633. {"type", "object"},
  634. {"properties", {
  635. {"tool_calls", {
  636. {"type", "array"},
  637. {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  638. {"anyOf", tool_call_schemas},
  639. }},
  640. {"minItems", 1},
  641. }},
  642. }},
  643. {"required", json::array({"tool_calls"})},
  644. }
  645. : json {
  646. {"type", "object"},
  647. {"properties", {
  648. {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json {
  649. {"anyOf", tool_call_schemas},
  650. }},
  651. }},
  652. {"required", json::array({"tool_call"})},
  653. };
  654. const auto schema =
  655. inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED
  656. ? json {
  657. {"anyOf", json::array({
  658. tool_call,
  659. {
  660. {"type", "object"},
  661. {"properties", {
  662. {"response", inputs.json_schema.is_null()
  663. ? json {{"type", "string"}}
  664. : inputs.json_schema
  665. },
  666. }},
  667. {"required", json::array({"response"})},
  668. },
  669. })}
  670. }
  671. : tool_call;
  672. data.grammar_lazy = false;
  673. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  674. builder.add_schema("root", schema);
  675. });
  676. auto tweaked_messages = common_chat_template::add_system(
  677. inputs.messages,
  678. "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request");
  679. data.prompt = apply(tmpl, tweaked_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  680. data.format = COMMON_CHAT_FORMAT_GENERIC;
  681. return data;
  682. }
  683. static common_chat_msg common_chat_parse_generic(const std::string & input) {
  684. json data = json::parse(input);
  685. common_chat_msg result;
  686. result.role = "assistant";
  687. if (data.contains("tool_calls")) {
  688. for (const auto & tool_call : data.at("tool_calls")) {
  689. result.tool_calls.push_back({
  690. tool_call.at("name"),
  691. tool_call.at("arguments").dump(),
  692. tool_call.contains("id") ? tool_call.at("id") : "",
  693. });
  694. }
  695. } else if (data.contains("tool_call")) {
  696. result.tool_calls.push_back({
  697. data.at("tool_call").at("name"),
  698. data.at("tool_call").at("arguments").dump(),
  699. /* id= */ "",
  700. });
  701. } else if (data.contains("response")) {
  702. const auto & response = data.at("response");
  703. result.content = response.is_string() ? response.get<std::string>() : response.dump(2);
  704. }
  705. return result;
  706. }
  707. static common_chat_params common_chat_params_init_mistral_nemo(const common_chat_template & tmpl, const struct templates_params & inputs) {
  708. common_chat_params data;
  709. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  710. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  711. auto schemas = json::array();
  712. foreach_function(inputs.tools, [&](const json & tool) {
  713. const auto & function = tool.at("function");
  714. schemas.push_back({
  715. {"type", "object"},
  716. {"properties", {
  717. // Important note: the model is probably trained to take a JSON stringified arguments value.
  718. // It's hard to constrain that for now (while reusing the JSON schema conversion), so we're just expecting a plain object.
  719. {"name", {
  720. {"type", "string"},
  721. {"const", function.at("name")},
  722. }},
  723. {"arguments", function.at("parameters")},
  724. {"id", {
  725. {"type", "string"},
  726. // Nemo's template expects a 9-character alphanumeric ID.
  727. {"pattern", "^[a-zA-Z0-9]{9}$"},
  728. }},
  729. }},
  730. {"required", json::array({"name", "arguments", "id"})},
  731. });
  732. });
  733. auto schema = json {
  734. {"type", "array"},
  735. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  736. {"minItems", 1},
  737. };
  738. if (!inputs.parallel_tool_calls) {
  739. schema["maxItems"] = 1;
  740. }
  741. builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema));
  742. });
  743. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"});
  744. data.preserved_tokens = {
  745. "[TOOL_CALLS]",
  746. };
  747. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  748. data.format = COMMON_CHAT_FORMAT_MISTRAL_NEMO;
  749. return data;
  750. }
  751. static common_chat_msg common_chat_parse_mistral_nemo(const std::string & input) {
  752. return parse_prefixed_json_tool_call_array(input, "[TOOL_CALLS]");
  753. }
  754. static common_chat_params common_chat_params_init_command_r7b(const common_chat_template & tmpl, const struct templates_params & inputs) {
  755. common_chat_params data;
  756. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  757. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  758. auto schemas = json::array();
  759. foreach_function(inputs.tools, [&](const json & tool) {
  760. const auto & function = tool.at("function");
  761. schemas.push_back({
  762. {"type", "object"},
  763. {"properties", {
  764. {"tool_call_id", {
  765. {"type", "string"},
  766. // Command-R's template expects an integer string.
  767. {"pattern", "^[0-9]{1,10}$"},
  768. }},
  769. {"tool_name", {
  770. {"type", "string"},
  771. {"const", function.at("name")},
  772. }},
  773. {"parameters", function.at("parameters")},
  774. }},
  775. {"required", json::array({"tool_call_id", "tool_name", "parameters"})},
  776. });
  777. });
  778. auto schema = json {
  779. {"type", "array"},
  780. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  781. {"minItems", 1},
  782. };
  783. if (!inputs.parallel_tool_calls) {
  784. schema["maxItems"] = 1;
  785. }
  786. builder.add_rule("root", "\"<|START_ACTION|>\" " + builder.add_schema("tool_calls", schema) + " \"<|END_ACTION|>\"");
  787. });
  788. data.grammar_triggers.push_back({
  789. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  790. "<|START_ACTION|>",
  791. });
  792. data.preserved_tokens = {
  793. "<|START_ACTION|>",
  794. "<|END_ACTION|>",
  795. "<|START_RESPONSE|>",
  796. "<|END_RESPONSE|>",
  797. "<|START_THINKING|>",
  798. "<|END_THINKING|>",
  799. };
  800. auto adjusted_messages = json::array();
  801. for (const auto & msg : inputs.messages) {
  802. auto has_reasoning_content = msg.contains("reasoning_content") && msg.at("reasoning_content").is_string();
  803. auto has_tool_calls = msg.contains("tool_calls") && msg.at("tool_calls").is_array();
  804. if (has_reasoning_content && has_tool_calls) {
  805. auto adjusted_message = msg;
  806. adjusted_message["tool_plan"] = msg.at("reasoning_content");
  807. adjusted_message.erase("reasoning_content");
  808. adjusted_messages.push_back(adjusted_message);
  809. } else {
  810. adjusted_messages.push_back(msg);
  811. }
  812. }
  813. data.prompt = apply(tmpl, adjusted_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt, {});
  814. data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING : COMMON_CHAT_FORMAT_COMMAND_R7B;
  815. return data;
  816. }
  817. static common_chat_msg common_chat_parse_command_r7b(const std::string & input, bool extract_reasoning) {
  818. static const std::regex thought_regex("(<\\|START_THINKING\\|>([\\s\\S]*?)<\\|END_THINKING\\|>)([\\s\\S]*)");
  819. static const std::regex action_regex("<\\|START_ACTION\\|>([\\s\\S]*?)<\\|END_ACTION\\|>");
  820. static const std::regex response_regex("(?:<\\|START_RESPONSE\\|>)?([\\s\\S]*?)<\\|END_RESPONSE\\|>");
  821. std::smatch match;
  822. common_chat_msg result;
  823. result.role = "assistant";
  824. std::string rest = input;
  825. if (std::regex_match(rest, match, thought_regex)) {
  826. if (extract_reasoning) {
  827. result.reasoning_content = match[2].str();
  828. } else if (!match[2].str().empty()) {
  829. // Let the unparsed thinking tags through in content only if their insides aren't empty.
  830. result.content = match[1].str();
  831. }
  832. rest = match[3].str();
  833. }
  834. if (std::regex_match(rest, match, action_regex)) {
  835. auto actions_str = match[1].str();
  836. auto actions = json::parse(actions_str);
  837. for (const auto & action : actions) {
  838. result.tool_calls.push_back({
  839. /* .name = */ action.at("tool_name"),
  840. /* .arguments = */ action.at("parameters").dump(),
  841. /* .id = */ action.at("tool_call_id"),
  842. });
  843. }
  844. } else if (std::regex_match(rest, match, response_regex)) {
  845. auto response = match[1].str();
  846. result.content += response;
  847. } else {
  848. result.content += rest;
  849. }
  850. return result;
  851. }
  852. static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector<std::string> & expected_properties) {
  853. if (!parameters.is_object() || !parameters.contains("type") || parameters.at("type") != "object" || !parameters.contains("properties") || !parameters.contains("required")) {
  854. throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties");
  855. }
  856. const auto & parameters_properties = parameters.at("properties");
  857. const auto & parameters_required = parameters.at("required");
  858. for (const auto & prop : expected_properties) {
  859. if (!parameters_properties.contains(prop)) {
  860. throw std::runtime_error("Parameters of tool " + name + " is missing property: " + prop); // NOLINT
  861. }
  862. if (std::find(parameters_required.begin(), parameters_required.end(), json(prop)) == parameters_required.end()) {
  863. throw std::runtime_error("Parameters of tool " + name + " must have property marked as required: " + prop); // NOLINT
  864. }
  865. }
  866. if (parameters_properties.size() != expected_properties.size()) {
  867. throw std::runtime_error("Parameters of tool " + name + " must only have these properties:" + string_join(expected_properties, ", "));
  868. }
  869. }
  870. static common_chat_params common_chat_params_init_llama_3_1_tool_calls(const common_chat_template & tmpl, const struct templates_params & inputs, bool allow_python_tag_builtin_tools) {
  871. auto builtin_tools = json::array();
  872. common_chat_params data;
  873. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  874. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  875. std::vector<std::string> tool_rules;
  876. auto handle_builtin_tool = [&](const std::string & name, const json & parameters) {
  877. if (name == "wolfram_alpha" || name == "web_search" || name == "brave_search") {
  878. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
  879. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
  880. expect_tool_parameters(name, parameters, {"query"});
  881. } else if (name == "python" || name == "code_interpreter") {
  882. // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
  883. expect_tool_parameters(name, parameters, {"code"});
  884. } else {
  885. return false;
  886. }
  887. std::vector<std::string> kvs;
  888. for (const auto & [key, value] : parameters.at("properties").items()) {
  889. kvs.push_back("\"" + key + "=\" " + builder.add_schema(name + "-args-" + key, value)); // NOLINT
  890. }
  891. tool_rules.push_back(
  892. builder.add_rule(
  893. name + "-call",
  894. "\"<|python_tag|>" + name + ".call(\" " + string_join(kvs, " \", \" ") + " \")\""));
  895. builtin_tools.push_back(name);
  896. return true;
  897. };
  898. foreach_function(inputs.tools, [&](const json & tool) {
  899. const auto & function = tool.at("function");
  900. std::string name = function.at("name");
  901. auto parameters = function.at("parameters");
  902. builder.resolve_refs(parameters);
  903. // https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote/tool_runtime
  904. if (allow_python_tag_builtin_tools) {
  905. handle_builtin_tool(name, parameters);
  906. }
  907. tool_rules.push_back(
  908. builder.add_rule(
  909. name + "-call",
  910. "\"{\" space "
  911. "( \"\\\"type\\\"\" space \":\" space \"\\\"function\\\"\" space \",\" space )? "
  912. " \"\\\"name\\\"\" space \":\" space \"\\\"" + name + "\\\"\" space \",\" space "
  913. " \"\\\"parameters\\\"\" space \":\" space " + builder.add_schema(name + "-args", parameters) + " "
  914. "\"}\" space"));
  915. });
  916. // Small models may hallucinate function names so we match anything (*at the start*) that looks like the JSON of a function call, regardless of the name.
  917. data.grammar_triggers.push_back({
  918. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START,
  919. "\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\"", // + name + "\"[\\s\\S]*",
  920. });
  921. if (!builtin_tools.empty()) {
  922. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  923. data.preserved_tokens.push_back("<|python_tag|>");
  924. }
  925. // Allow a few empty lines on top of the usual constrained json schema space rule.
  926. builder.add_rule("root", string_join(tool_rules, " | "));
  927. });
  928. data.additional_stops.push_back("<|eom_id|>");
  929. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt, {
  930. {"tools_in_user_message", false},
  931. {"builtin_tools", builtin_tools.empty() ? json() : builtin_tools},
  932. });
  933. data.format = allow_python_tag_builtin_tools && !builtin_tools.empty()
  934. ? COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS
  935. : COMMON_CHAT_FORMAT_LLAMA_3_X;
  936. return data;
  937. }
  938. static common_chat_msg common_chat_parse_llama_3_1(const std::string & input, bool with_builtin_tools = false) {
  939. // TODO: tighten & simplify the parser, don't accept leading text context.
  940. static const std::regex function_regex(
  941. "\\s*\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\"([^\"]+)\"\\s*,\\s*\"parameters\"\\s*: ");
  942. static const std::regex close_regex("\\}\\s*");
  943. static const std::regex builtin_call_regex("<\\|python_tag\\|>\\s*([^.(]+)\\s*\\.\\s*call\\s*\\(\\s*([\\w]+)\\s*=\\s*([\\s\\S]*?)\\)");
  944. if (with_builtin_tools) {
  945. std::smatch match;
  946. if (std::regex_match(input, match, builtin_call_regex)) {
  947. try {
  948. auto name = match[1].str();
  949. auto arg_name = match[2].str();
  950. auto arg_value_str = match[3].str();
  951. auto arg_value = json::parse(arg_value_str);
  952. common_chat_msg msg;
  953. msg.role = "assistant";
  954. msg.tool_calls.push_back({
  955. /* .name = */ name,
  956. /* .arguments = */ (json {
  957. {arg_name, arg_value},
  958. }).dump(),
  959. /* .id = */ "",
  960. });
  961. return msg;
  962. } catch (const std::exception & e) {
  963. LOG_WRN("Failed to parse builtin tool call arguments (%s): %s", e.what(), input.c_str());
  964. }
  965. }
  966. }
  967. return parse_json_tool_calls(input, std::nullopt, function_regex, close_regex);
  968. }
  969. static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  970. common_chat_params data;
  971. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  972. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED && inputs.json_schema.is_null();
  973. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  974. std::vector<std::string> tool_rules;
  975. foreach_function(inputs.tools, [&](const json & tool) {
  976. const auto & function = tool.at("function");
  977. std::string name = function.at("name");
  978. auto parameters = function.at("parameters");
  979. builder.resolve_refs(parameters);
  980. tool_rules.push_back(builder.add_rule(name + "-call",
  981. "\"<|tool▁call▁begin|>function<|tool▁sep|>" + name + "\\n"
  982. "```json\\n\" " + builder.add_schema(name + "-args", parameters) + " "
  983. "\"```<|tool▁call▁end|>\""));
  984. });
  985. // Distill Qwen 7B & 32B models seem confused re/ syntax of their tool call opening tag,
  986. // so we accept common variants (then it's all constrained)
  987. builder.add_rule("root",
  988. "( \"<|tool▁calls▁begin|>\" | \"<|tool_calls_begin|>\" | \"<|tool calls begin|>\" | \"<|tool\\\\_calls\\\\_begin|>\" ) "
  989. "(" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " "
  990. "\"<|tool▁calls▁end|>\""
  991. " space");
  992. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|tool▁calls▁begin|>"});
  993. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|tool_calls_begin|>"});
  994. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|tool calls begin|>"});
  995. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|tool\\_calls\\_begin|>"});
  996. data.preserved_tokens = {
  997. "<think>",
  998. "</think>",
  999. "<|tool▁calls▁begin|>",
  1000. "<|tool▁call▁begin|>",
  1001. "<|tool▁sep|>",
  1002. "<|tool▁call▁end|>",
  1003. "<|tool▁calls▁end|",
  1004. };
  1005. });
  1006. }
  1007. auto prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  1008. // Hacks to fix the official (broken) prompt.
  1009. // It is advisable to use --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja instead,
  1010. // until the official template is fixed.
  1011. if (tmpl.source().find("{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}") != std::string::npos) {
  1012. // Don't leave the chat dangling after tool results
  1013. if (string_ends_with(prompt, "<|tool▁outputs▁end|>")) {
  1014. prompt += "<|end▁of▁sentence|>";
  1015. if (inputs.add_generation_prompt) {
  1016. prompt += "<|Assistant|>";
  1017. }
  1018. }
  1019. // Fix up tool call delta example added by Minja
  1020. prompt = std::regex_replace(
  1021. prompt,
  1022. std::regex("(<|tool▁call▁end|>)[\\s\\r\\n]*(<|tool▁outputs▁begin|>|<|User|>)"),
  1023. "$1<|tool▁calls▁end|><|end▁of▁sentence|>$2");
  1024. }
  1025. data.prompt = prompt;
  1026. data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING : COMMON_CHAT_FORMAT_DEEPSEEK_R1;
  1027. return data;
  1028. }
  1029. static common_chat_msg handle_think_tag_prelude(const std::string & input, bool extract_reasoning, const std::function<common_chat_msg(const std::string &)> & rest_parser) {
  1030. std::smatch match;
  1031. static const std::regex reasoning_content_regex("((?:<think>)?([\\s\\S\\r\\n]*?)</think>)?([\\s\\S\\r\\n]*)");
  1032. if (std::regex_match(input, match, reasoning_content_regex)) {
  1033. auto rest = match[3].str();
  1034. auto msg = rest_parser(rest);
  1035. auto reasoning_content = string_strip(match[2].str());
  1036. if (extract_reasoning) {
  1037. msg.reasoning_content = reasoning_content;
  1038. } else if (!reasoning_content.empty()) {
  1039. std::ostringstream content;
  1040. content << "<think>" << reasoning_content << "</think>" << msg.content;
  1041. msg.content = content.str();
  1042. }
  1043. return msg;
  1044. }
  1045. return rest_parser(input);
  1046. }
  1047. static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool extract_reasoning) {
  1048. return handle_think_tag_prelude(input, extract_reasoning, [](const std::string & input) {
  1049. static const std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n");
  1050. static const std::regex close_regex("```[\\s\\r\\n]*<|tool▁call▁end|>");
  1051. static const std::regex tool_calls_regex("[\\s\\r\\n]*(?:<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>)([\\s\\S\\r\\n]*?)<|tool▁calls▁end|>");
  1052. common_chat_msg msg;
  1053. msg.role = "assistant";
  1054. std::smatch match;
  1055. if (std::regex_search(input, match, tool_calls_regex)) {
  1056. auto tool_calls = match[1].str();
  1057. auto msg2 = parse_json_tool_calls(tool_calls, std::nullopt, function_regex, close_regex);
  1058. msg.tool_calls = std::move(msg2.tool_calls);
  1059. } else {
  1060. msg.content = input;
  1061. }
  1062. return msg;
  1063. });
  1064. }
  1065. static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1066. LOG_DBG("%s\n", __func__);
  1067. common_chat_params data;
  1068. data.prompt = apply(tmpl, inputs.messages, /* tools= */ nullptr, inputs.add_generation_prompt, {
  1069. {"datetime", "Jan 29 2025 13:00:00 GMT"},
  1070. {"functions", json(inputs.tools.empty() ? "" : inputs.tools.dump(2))},
  1071. });
  1072. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1073. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1074. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1075. auto schemas = json::array();
  1076. foreach_function(inputs.tools, [&](const json & tool) {
  1077. const auto & function = tool.at("function");
  1078. schemas.push_back({
  1079. {"type", "object"},
  1080. {"properties", {
  1081. {"name", {
  1082. {"type", "string"},
  1083. {"const", function.at("name")},
  1084. }},
  1085. {"arguments", function.at("parameters")},
  1086. }},
  1087. {"required", json::array({"name", "arguments", "id"})},
  1088. });
  1089. });
  1090. auto schema = json {
  1091. {"type", "array"},
  1092. {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}},
  1093. {"minItems", 1},
  1094. };
  1095. if (!inputs.parallel_tool_calls) {
  1096. schema["maxItems"] = 1;
  1097. }
  1098. builder.add_rule("root", "\" functools\"? " + builder.add_schema("tool_calls", schema));
  1099. });
  1100. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, " functools["});
  1101. data.preserved_tokens = {
  1102. " functools[",
  1103. };
  1104. data.format = COMMON_CHAT_FORMAT_FIREFUNCTION_V2;
  1105. } else {
  1106. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1107. }
  1108. return data;
  1109. }
  1110. static common_chat_msg common_chat_parse_firefunction_v2(const std::string & input) {
  1111. return parse_prefixed_json_tool_call_array(input, " functools[", /* rstrip_prefix= */ 1);
  1112. }
  1113. static common_chat_params common_chat_params_init_functionary_v3_2(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1114. // >>>all\nlet's call functions>>>fn1\n{"arg1": 1...}\n>>>fn2\n{"arg1": 1...}...
  1115. // Using ">>>f1\n", ">>>f2\n"... as trigger words for the grammar
  1116. common_chat_params data;
  1117. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  1118. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2;
  1119. if (inputs.tools.is_array() && !inputs.tools.empty()) {
  1120. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1121. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1122. std::vector<std::string> first_tool_rules;
  1123. std::vector<std::string> subsequent_tool_rules;
  1124. foreach_function(inputs.tools, [&](const json & tool) {
  1125. const auto & function = tool.at("function");
  1126. std::string name = function.at("name");
  1127. auto parameters = function.at("parameters");
  1128. builder.resolve_refs(parameters);
  1129. auto args_rule = builder.add_schema(name + "-args", parameters);
  1130. first_tool_rules.push_back(builder.add_rule(name + "-call", "( \"assistant<|end_header_id|>\\n\" )? \"" + name + "\\n\" " + args_rule));
  1131. subsequent_tool_rules.push_back(builder.add_rule(name + "-call2", "\">>>" + name + "\\n\" " + args_rule));
  1132. data.grammar_triggers.push_back({
  1133. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START,
  1134. regex_escape(name + "\n"),
  1135. });
  1136. data.grammar_triggers.push_back({
  1137. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START,
  1138. regex_escape("assistant<|end_header_id|>\n" + name + "\n"),
  1139. });
  1140. data.grammar_triggers.push_back({
  1141. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  1142. regex_escape(">>>" + name + "\n"),
  1143. });
  1144. data.grammar_triggers.push_back({
  1145. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  1146. ">>>assistant<|end_header_id|>\n" + name,
  1147. });
  1148. });
  1149. data.preserved_tokens = {
  1150. "<|end_header_id|>",
  1151. };
  1152. auto first_rule = first_tool_rules.empty() ? "" : builder.add_rule("first_tool_call", string_join(first_tool_rules, " | ")) + " space";
  1153. if (inputs.parallel_tool_calls) {
  1154. auto subsequent_rule = builder.add_rule("subsequent_tool_call", string_join(subsequent_tool_rules, " | ")) + " space";
  1155. builder.add_rule("root", first_rule + " (" + subsequent_rule + ")*");
  1156. } else {
  1157. builder.add_rule("root", first_rule);
  1158. }
  1159. });
  1160. }
  1161. return data;
  1162. }
  1163. static common_chat_msg common_chat_parse_functionary_v3_2(const std::string & input) {
  1164. static const std::regex function_regex(R"((?:>>>)?(?:assistant<|end_header_id|>\n)?(\w+)\n)");
  1165. static const std::regex close_regex(R"($|(?=>>>))");
  1166. std::string content;
  1167. auto it = input.begin();
  1168. const auto end = input.end();
  1169. if (parse_literal(it, end, "all\n")) {
  1170. std::smatch match;
  1171. if (std::regex_search(it, end, match, function_regex)) {
  1172. auto fun_it = match.prefix().second;
  1173. content = std::string(it, fun_it);
  1174. it = fun_it;
  1175. } else {
  1176. common_chat_msg res;
  1177. res.role = "assistant";
  1178. res.content = std::string(it, end);
  1179. return res;
  1180. }
  1181. }
  1182. // TODO: tighten & simplify.
  1183. try {
  1184. auto res = parse_json_tool_calls(std::string(it, end), std::nullopt, function_regex, close_regex, /* allow_raw_python= */ true);
  1185. res.content = content + res.content;
  1186. return res;
  1187. } catch (const std::exception & e) {
  1188. LOG_ERR("Failed to parse functionary v3.2 input: %s\n", e.what());
  1189. common_chat_msg res;
  1190. res.role = "assistant";
  1191. res.content = input;
  1192. return res;
  1193. }
  1194. }
  1195. static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1196. // https://github.com/MeetKai/functionary/blob/main/tests/prompt_test_v3-llama3.1.txt
  1197. common_chat_params data;
  1198. json tools = inputs.tools.is_null() ? inputs.tools : json::array();
  1199. std::string python_code_argument_name;
  1200. auto has_raw_python = false;
  1201. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1202. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1203. std::vector<std::string> tool_rules;
  1204. foreach_function(inputs.tools, [&](const json & tool) {
  1205. const auto & function = tool.at("function");
  1206. const auto & parameters = function.at("parameters");
  1207. std::string name = function.at("name");
  1208. if (name == "python" || name == "ipython") {
  1209. if (!parameters.contains("type")) {
  1210. throw std::runtime_error("Missing type in python tool");
  1211. }
  1212. has_raw_python = true;
  1213. const auto & type = parameters.at("type");
  1214. if (type == "object") {
  1215. auto properties = parameters.at("properties");
  1216. for (auto it = properties.begin(); it != properties.end(); ++it) {
  1217. if (it.value().at("type") == "string") {
  1218. if (!python_code_argument_name.empty()) {
  1219. throw std::runtime_error("Multiple string arguments found in python tool");
  1220. }
  1221. python_code_argument_name = it.key();
  1222. }
  1223. }
  1224. if (python_code_argument_name.empty()) {
  1225. throw std::runtime_error("No string argument found in python tool");
  1226. }
  1227. } else if (type != "string") {
  1228. throw std::runtime_error("Invalid type in python tool: " + type.dump());
  1229. }
  1230. }
  1231. tool_rules.push_back(builder.add_rule(name + "-call", "\"<function=" + name + ">\" " + builder.add_schema(name + "-args", parameters) + " \"</function>\" space"));
  1232. });
  1233. if (has_raw_python) {
  1234. tool_rules.push_back(builder.add_rule("python-call", "\"<|python_tag|>\" .*"));
  1235. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<|python_tag|>"});
  1236. data.preserved_tokens.push_back("<|python_tag|>");
  1237. }
  1238. auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " space";
  1239. builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call);
  1240. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<function="});
  1241. });
  1242. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  1243. // TODO: if (has_raw_python)
  1244. data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1;
  1245. return data;
  1246. }
  1247. static common_chat_msg common_chat_parse_functionary_v3_1_llama_3_1(const std::string & input) {
  1248. // This version of Functionary still supports the llama 3.1 tool call format for the python tool.
  1249. static const std::regex python_tag_regex(R"(<\|python_tag\|>([\s\S\n]*)$)");
  1250. std::smatch match;
  1251. if (std::regex_search(input, match, python_tag_regex)) {
  1252. auto code = match[1].str();
  1253. common_chat_msg msg;
  1254. msg.role = "assistant";
  1255. msg.content = match.prefix().str();
  1256. msg.tool_calls.push_back({
  1257. /* .name = */ "python",
  1258. /* .arguments = */ (json {{"code", code}}).dump(),
  1259. /* .id = */ "",
  1260. });
  1261. return msg;
  1262. }
  1263. static const std::regex function_regex(R"(<function=(\w+)>)");
  1264. static const std::regex close_regex(R"(</function>)");
  1265. // TODO: tighten & simplify.
  1266. return parse_json_tool_calls(input, std::nullopt, function_regex, close_regex);
  1267. }
  1268. static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1269. common_chat_params data;
  1270. // (content)?(<tool_call>{"name": "foo", "arguments": {"a": 1}}</tool_call>)*
  1271. data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
  1272. data.grammar = build_grammar([&](const common_grammar_builder & builder) {
  1273. std::vector<std::string> tool_rules;
  1274. std::vector<std::string> tool_call_alts;
  1275. foreach_function(inputs.tools, [&](const json & tool) {
  1276. const auto & function = tool.at("function");
  1277. std::string name = function.at("name");
  1278. auto parameters = function.at("parameters");
  1279. builder.resolve_refs(parameters);
  1280. tool_rules.push_back(builder.add_schema(name + "-call", {
  1281. {"type", "object"},
  1282. {"properties", json {
  1283. {"name", json {{"const", name}}},
  1284. {"arguments", parameters},
  1285. }},
  1286. {"required", json::array({"name", "arguments"})},
  1287. }));
  1288. tool_call_alts.push_back(builder.add_rule(
  1289. name + "-function-tag",
  1290. "\"<function\" ( \"=" + name + "\" | \" name=\\\"" + name + "\\\"\" ) \">\" space " +
  1291. builder.add_schema(name + "-args", parameters) + " "
  1292. "\"</function>\" space"));
  1293. data.grammar_triggers.push_back({
  1294. COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
  1295. "<function=" + name + ">",
  1296. });
  1297. auto escaped_name = regex_escape(name);
  1298. data.grammar_triggers.push_back({
  1299. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
  1300. "<function\\s+name\\s*=\\s*\"" + escaped_name + "\"",
  1301. });
  1302. });
  1303. auto any_tool_call = builder.add_rule("any_tool_call", "( " + string_join(tool_rules, " | ") + " ) space");
  1304. std::vector<std::string> alt_tags {
  1305. any_tool_call,
  1306. "\"<tool_call>\" space " + any_tool_call + " \"</tool_call>\"",
  1307. // The rest is just to accommodate common "good bad" outputs.
  1308. "\"<function_call>\" space " + any_tool_call + " \"</function_call>\"",
  1309. "\"<response>\" space " + any_tool_call + " \"</response>\"",
  1310. "\"<tools>\" space " + any_tool_call + " \"</tools>\"",
  1311. "\"<json>\" space " + any_tool_call + " \"</json>\"",
  1312. "\"<xml>\" space " + any_tool_call + " \"</xml>\"",
  1313. "\"<JSON>\" space " + any_tool_call + " \"</JSON>\"",
  1314. };
  1315. auto wrappable_tool_call = builder.add_rule("wrappable_tool_call", "( " + string_join(alt_tags, " | ") + " ) space");
  1316. tool_call_alts.push_back(wrappable_tool_call);
  1317. tool_call_alts.push_back(
  1318. "( \"```\\n\" | \"```json\\n\" | \"```xml\\n\" ) space " + wrappable_tool_call + " space \"```\" space ");
  1319. auto tool_call = builder.add_rule("tool_call", string_join(tool_call_alts, " | "));
  1320. builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call);
  1321. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<tool_call>"});
  1322. data.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<function"});
  1323. // Trigger on some common known "good bad" outputs (only from the start and with a json that's about a specific argument name to avoid false positives)
  1324. data.grammar_triggers.push_back({
  1325. COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START,
  1326. "(?:```(?:json|xml)?\n\\s*)?(?:<function_call>|<tools>|<xml><json>|<response>)?\\s*\\{\\s*\"", //name\"\\s*:\\s*\"" + escaped_name + "\"",
  1327. });
  1328. data.preserved_tokens = {
  1329. "<think>",
  1330. "</think>",
  1331. "<tool_call>",
  1332. "</tool_call>",
  1333. "<function",
  1334. "<tools>",
  1335. "</tools>",
  1336. "<response>",
  1337. "</response>",
  1338. "<function_call>",
  1339. "</function_call>",
  1340. "<json>",
  1341. "</json>",
  1342. "<JSON>",
  1343. "</JSON>",
  1344. "```",
  1345. "```json",
  1346. "```xml",
  1347. };
  1348. });
  1349. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  1350. data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING : COMMON_CHAT_FORMAT_HERMES_2_PRO;
  1351. return data;
  1352. }
  1353. static common_chat_msg common_chat_parse_hermes_2_pro(const std::string& input, bool extract_reasoning) {
  1354. return handle_think_tag_prelude(input, extract_reasoning, [](const std::string & input) {
  1355. static const std::regex open_regex(
  1356. "(?:"
  1357. "(```(?:xml|json)?\\n\\s*)?" // match 1 (block_start)
  1358. "(<tool_call>" // match 2 (open_tag)
  1359. "|<function_call>"
  1360. "|<tool>"
  1361. "|<tools>"
  1362. "|<response>"
  1363. "|<json>"
  1364. "|<xml>"
  1365. "|<JSON>"
  1366. ")?"
  1367. "(\\s*\\{\\s*\"name\"\\s*:[\\s\\S]*)" // match 3 (named tool call + rest)
  1368. ")"
  1369. "|"
  1370. "(?:<function=([^>]+)>" // match 4 (function name)
  1371. "|<function name=\"([^\"]+)\">)" // match 5 (function name again)
  1372. "([\\s\\S]*)" // match 6 (function arguments + rest)})"
  1373. );
  1374. try {
  1375. common_chat_msg msg;
  1376. msg.role = "assistant";
  1377. std::string::const_iterator it = input.begin();
  1378. const std::string::const_iterator end = input.end();
  1379. std::smatch match;
  1380. while (it != end) {
  1381. if (std::regex_search(it, end, match, open_regex)) {
  1382. // Add content before the match
  1383. msg.content += std::string(it, match[0].first);
  1384. auto block_start = match[1].str();
  1385. std::string block_end = block_start.empty() ? "" : "```";
  1386. auto open_tag = match[2].str();
  1387. std::string close_tag;
  1388. if (match[3].matched) {
  1389. close_tag = open_tag.empty() ? "" : "</" + open_tag.substr(1);
  1390. auto json_it = match[3].first;
  1391. json tool_call;
  1392. if (parse_json(json_it, end, tool_call) && tool_call.contains("name") && tool_call.contains("arguments")) {
  1393. msg.tool_calls.emplace_back(process_tool_call(tool_call));
  1394. it = json_it; // Move iterator past parsed JSON
  1395. // Handle close tags
  1396. consume_spaces(it, end);
  1397. if (!close_tag.empty() && !parse_literal(it, end, close_tag)) {
  1398. throw std::runtime_error("Failed to parse closing tag");
  1399. }
  1400. consume_spaces(it, end);
  1401. if (!block_end.empty() && !parse_literal(it, end, block_end)) {
  1402. throw std::runtime_error("Failed to parse block end");
  1403. }
  1404. consume_spaces(it, end);
  1405. } else {
  1406. // Not a valid tool call, treat as content
  1407. msg.content += std::string(match[0].first, match[0].second);
  1408. it = match[0].second;
  1409. }
  1410. } else {
  1411. auto function_name = match[4].str();
  1412. if (function_name.empty()) {
  1413. function_name = match[5].str();
  1414. }
  1415. GGML_ASSERT(!function_name.empty());
  1416. close_tag = "</function>";
  1417. // Start parsing from after the opening tags
  1418. auto json_it = match[6].first;
  1419. json arguments;
  1420. if (parse_json(json_it, end, arguments)) {
  1421. msg.tool_calls.emplace_back(process_tool_call({
  1422. {"name", function_name},
  1423. {"arguments", arguments},
  1424. }));
  1425. it = json_it; // Move iterator past parsed JSON
  1426. // Handle close tags
  1427. consume_spaces(it, end);
  1428. if (!close_tag.empty() && !parse_literal(it, end, close_tag)) {
  1429. throw std::runtime_error("Failed to parse closing tag");
  1430. }
  1431. consume_spaces(it, end);
  1432. if (!block_end.empty() && !parse_literal(it, end, block_end)) {
  1433. throw std::runtime_error("Failed to parse block end");
  1434. }
  1435. consume_spaces(it, end);
  1436. } else {
  1437. // Not a valid tool call, treat as content
  1438. msg.content += std::string(match[0].first, match[0].second);
  1439. it = match[0].second;
  1440. }
  1441. }
  1442. } else {
  1443. // Add remaining content
  1444. msg.content += std::string(it, end);
  1445. break;
  1446. }
  1447. }
  1448. return msg;
  1449. } catch (const std::exception & e) {
  1450. LOG_ERR("Failed to parse hermes 2 pro input: %s\n", e.what());
  1451. common_chat_msg msg;
  1452. msg.role = "assistant";
  1453. msg.content = input;
  1454. return msg;
  1455. }
  1456. });
  1457. }
  1458. static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
  1459. common_chat_params data;
  1460. data.prompt = apply(tmpl, inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt);
  1461. data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
  1462. data.grammar_lazy = false;
  1463. if (!inputs.json_schema.is_null()) {
  1464. if (!inputs.grammar.empty()) {
  1465. throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
  1466. }
  1467. data.grammar = json_schema_to_grammar(inputs.json_schema);
  1468. } else {
  1469. data.grammar = inputs.grammar;
  1470. }
  1471. return data;
  1472. }
  1473. static common_chat_params common_chat_templates_apply_jinja(
  1474. const struct common_chat_templates * tmpls,
  1475. const struct common_chat_templates_inputs & inputs)
  1476. {
  1477. templates_params params;
  1478. params.tools = common_chat_tools_to_json_oaicompat<json>(inputs.tools);
  1479. const auto & tmpl = params.tools.is_array() && tmpls->template_tool_use
  1480. ? *tmpls->template_tool_use
  1481. : *tmpls->template_default;
  1482. const auto & src = tmpl.source();
  1483. const auto & caps = tmpl.original_caps();
  1484. params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content);
  1485. params.add_generation_prompt = inputs.add_generation_prompt;
  1486. params.extract_reasoning = inputs.extract_reasoning;
  1487. params.tool_choice = inputs.tool_choice;
  1488. params.grammar = inputs.grammar;
  1489. if (!inputs.json_schema.empty()) {
  1490. params.json_schema = json::parse(inputs.json_schema);
  1491. }
  1492. if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
  1493. LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
  1494. params.parallel_tool_calls = false;
  1495. } else {
  1496. params.parallel_tool_calls = inputs.parallel_tool_calls;
  1497. }
  1498. if (params.tools.is_array()) {
  1499. if (params.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && !params.grammar.empty()) {
  1500. throw std::runtime_error("Cannot specify grammar with tools");
  1501. }
  1502. if (caps.supports_tool_calls && !caps.supports_tools) {
  1503. LOG_WRN("Template supports tool calls but does not natively describe tools. The fallback behaviour used may produce bad results, inspect prompt w/ --verbose & consider overriding the template.\n");
  1504. }
  1505. }
  1506. // DeepSeek R1: use handler in all cases except json schema (thinking / tools).
  1507. if (src.find("<|tool▁calls▁begin|>") != std::string::npos && params.json_schema.is_null()) {
  1508. return common_chat_params_init_deepseek_r1(tmpl, params);
  1509. }
  1510. // Command R7B: : use handler in all cases except json schema (thinking / tools).
  1511. if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos && params.json_schema.is_null()) {
  1512. return common_chat_params_init_command_r7b(tmpl, params);
  1513. }
  1514. // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
  1515. if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
  1516. return common_chat_params_init_hermes_2_pro(tmpl, params);
  1517. }
  1518. // Use generic handler when mixing tools + JSON schema.
  1519. // TODO: support that mix in handlers below.
  1520. if ((params.tools.is_array() && params.json_schema.is_object())) {
  1521. return common_chat_params_init_generic(tmpl, params);
  1522. }
  1523. // Functionary prepends "all\n" to plain content outputs, so we use its handler in all cases.
  1524. if (src.find(">>>all") != std::string::npos) {
  1525. return common_chat_params_init_functionary_v3_2(tmpl, params);
  1526. }
  1527. // Firefunction v2 requires datetime and functions in the context even w/o tools, so we also use its handler in all cases.
  1528. if (src.find(" functools[") != std::string::npos) {
  1529. return common_chat_params_init_firefunction_v2(tmpl, params);
  1530. }
  1531. // Plain handler (no tools)
  1532. if (params.tools.is_null() || inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_NONE) {
  1533. return common_chat_params_init_without_tools(tmpl, params);
  1534. }
  1535. // Functionary v3.1 (w/ tools)
  1536. if (src.find("<|start_header_id|>") != std::string::npos
  1537. && src.find("<function=") != std::string::npos) {
  1538. return common_chat_params_init_functionary_v3_1_llama_3_1(tmpl, params);
  1539. }
  1540. // Llama 3.1, 3.2, 3.3 (w/ tools)
  1541. if (src.find("<|start_header_id|>ipython<|end_header_id|>") != std::string::npos) {
  1542. auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos;
  1543. return common_chat_params_init_llama_3_1_tool_calls(tmpl, params, allow_python_tag_builtin_tools);
  1544. }
  1545. // Mistral Nemo (w/ tools)
  1546. if (src.find("[TOOL_CALLS]") != std::string::npos) {
  1547. return common_chat_params_init_mistral_nemo(tmpl, params);
  1548. }
  1549. // Generic fallback
  1550. return common_chat_params_init_generic(tmpl, params);
  1551. }
  1552. // Legacy template route (adhoc C++ implementation of known templates), forward to llama_chat_apply_template.
  1553. static common_chat_params common_chat_templates_apply_legacy(
  1554. const struct common_chat_templates * tmpls,
  1555. const struct common_chat_templates_inputs & inputs)
  1556. {
  1557. int alloc_size = 0;
  1558. std::vector<llama_chat_message> chat;
  1559. std::vector<std::string> contents;
  1560. for (const auto & msg : inputs.messages) {
  1561. auto content = msg.content;
  1562. for (const auto & part : msg.content_parts) {
  1563. if (part.type != "text") {
  1564. LOG_WRN("Ignoring non-text content part: %s\n", part.type.c_str());
  1565. continue;
  1566. }
  1567. if (!content.empty()) {
  1568. content += "\n";;
  1569. }
  1570. content += part.text;
  1571. }
  1572. contents.emplace_back(std::move(content));
  1573. }
  1574. for (size_t i = 0; i < contents.size(); ++i) {
  1575. const auto & msg = inputs.messages[i];
  1576. const auto & content = contents[i];
  1577. chat.push_back({msg.role.c_str(), content.c_str()});
  1578. alloc_size += (msg.role.size() + content.size()) * 1.25;
  1579. }
  1580. std::vector<char> buf(alloc_size);
  1581. // run the first time to get the total output length
  1582. const auto & src = tmpls->template_default->source();
  1583. int32_t res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  1584. // error: chat template is not supported
  1585. if (res < 0) {
  1586. // if the custom "tmpl" is not supported, we throw an error
  1587. // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
  1588. throw std::runtime_error("this custom template is not supported");
  1589. }
  1590. // if it turns out that our buffer is too small, we resize it
  1591. if ((size_t) res > buf.size()) {
  1592. buf.resize(res);
  1593. res = llama_chat_apply_template(src.c_str(), chat.data(), chat.size(), inputs.add_generation_prompt, buf.data(), buf.size());
  1594. }
  1595. common_chat_params params;
  1596. params.prompt = std::string(buf.data(), res);
  1597. if (!inputs.json_schema.empty()) {
  1598. params.grammar = json_schema_to_grammar(json::parse(inputs.json_schema));
  1599. } else {
  1600. params.grammar = inputs.grammar;
  1601. }
  1602. return params;
  1603. }
  1604. common_chat_params common_chat_templates_apply(
  1605. const struct common_chat_templates * tmpls,
  1606. const struct common_chat_templates_inputs & inputs)
  1607. {
  1608. GGML_ASSERT(tmpls != nullptr);
  1609. return inputs.use_jinja
  1610. ? common_chat_templates_apply_jinja(tmpls, inputs)
  1611. : common_chat_templates_apply_legacy(tmpls, inputs);
  1612. }
  1613. static common_chat_msg common_chat_parse_content_only(const std::string & input) {
  1614. common_chat_msg msg;
  1615. msg.role = "assistant";
  1616. msg.content = input;
  1617. return msg;
  1618. }
  1619. common_chat_msg common_chat_parse(const std::string & input, common_chat_format format) {
  1620. switch (format) {
  1621. case COMMON_CHAT_FORMAT_CONTENT_ONLY:
  1622. return common_chat_parse_content_only(input);
  1623. case COMMON_CHAT_FORMAT_GENERIC:
  1624. return common_chat_parse_generic(input);
  1625. case COMMON_CHAT_FORMAT_MISTRAL_NEMO:
  1626. return common_chat_parse_mistral_nemo(input);
  1627. case COMMON_CHAT_FORMAT_LLAMA_3_X:
  1628. return common_chat_parse_llama_3_1(input);
  1629. case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS:
  1630. return common_chat_parse_llama_3_1(input, /* with_builtin_tools= */ true);
  1631. case COMMON_CHAT_FORMAT_DEEPSEEK_R1:
  1632. return common_chat_parse_deepseek_r1(input, /* extract_reasoning= */ false);
  1633. case COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING:
  1634. return common_chat_parse_deepseek_r1(input, /* extract_reasoning= */ true);
  1635. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2:
  1636. return common_chat_parse_functionary_v3_2(input);
  1637. case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1:
  1638. return common_chat_parse_functionary_v3_1_llama_3_1(input);
  1639. case COMMON_CHAT_FORMAT_HERMES_2_PRO:
  1640. return common_chat_parse_hermes_2_pro(input, /* extract_reasoning= */ false);
  1641. case COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING:
  1642. return common_chat_parse_hermes_2_pro(input, /* extract_reasoning= */ true);
  1643. case COMMON_CHAT_FORMAT_FIREFUNCTION_V2:
  1644. return common_chat_parse_firefunction_v2(input);
  1645. case COMMON_CHAT_FORMAT_COMMAND_R7B:
  1646. return common_chat_parse_command_r7b(input, /* extract_reasoning= */ false);
  1647. case COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING:
  1648. return common_chat_parse_command_r7b(input, /* extract_reasoning= */ true);
  1649. default:
  1650. throw std::runtime_error("Unsupported format: " + common_chat_format_name(format));
  1651. }
  1652. }