chat-template.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. Copyright 2024 Google LLC
  3. Use of this source code is governed by an MIT-style
  4. license that can be found in the LICENSE file or at
  5. https://opensource.org/licenses/MIT.
  6. */
  7. // SPDX-License-Identifier: MIT
  8. #pragma once
  9. #include "minja.hpp"
  10. #include <chrono>
  11. #include <cstddef>
  12. #include <cstdio>
  13. #include <ctime>
  14. #include <exception>
  15. #include <iomanip>
  16. #include <memory>
  17. #include <sstream>
  18. #include <stdexcept>
  19. #include <string>
  20. #include <vector>
  21. #include <nlohmann/json.hpp>
  22. using json = nlohmann::ordered_json;
  23. namespace minja {
  24. struct chat_template_caps {
  25. bool supports_tools = false;
  26. bool supports_tool_calls = false;
  27. bool supports_tool_responses = false;
  28. bool supports_system_role = false;
  29. bool supports_parallel_tool_calls = false;
  30. bool supports_tool_call_id = false;
  31. // meta-llama/Llama-3.1-8B-Instruct expects arguments to be an object.
  32. // Most other templates (and OpenAI's API) expect the arguments object to be stringified.
  33. bool requires_object_arguments = false;
  34. // CohereForAI/c4ai-command-r-plus simple variant
  35. bool requires_non_null_content = false;
  36. // MiniMaxAI/MiniMax-Text-01 special
  37. bool requires_typed_content = false;
  38. };
  39. struct chat_template_inputs {
  40. nlohmann::ordered_json messages;
  41. nlohmann::ordered_json tools;
  42. bool add_generation_prompt = true;
  43. nlohmann::ordered_json extra_context;
  44. std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
  45. };
  46. struct chat_template_options {
  47. bool apply_polyfills = true;
  48. bool use_bos_token = true;
  49. bool use_eos_token = true;
  50. bool define_strftime_now = true;
  51. bool polyfill_tools = true;
  52. bool polyfill_tool_call_examples = true;
  53. bool polyfill_tool_calls = true;
  54. bool polyfill_tool_responses = true;
  55. bool polyfill_system_role = true;
  56. bool polyfill_object_arguments = true;
  57. bool polyfill_typed_content = true;
  58. };
  59. class chat_template {
  60. private:
  61. chat_template_caps caps_;
  62. std::string source_;
  63. std::string bos_token_;
  64. std::string eos_token_;
  65. std::shared_ptr<minja::TemplateNode> template_root_;
  66. std::string tool_call_example_;
  67. std::string try_raw_render(
  68. const nlohmann::ordered_json & messages,
  69. const nlohmann::ordered_json & tools,
  70. bool add_generation_prompt,
  71. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
  72. {
  73. try {
  74. chat_template_inputs inputs;
  75. inputs.messages = messages;
  76. inputs.tools = tools;
  77. inputs.add_generation_prompt = add_generation_prompt;
  78. inputs.extra_context = extra_context;
  79. // Use fixed date for tests
  80. inputs.now = std::chrono::system_clock::from_time_t(0);
  81. chat_template_options opts;
  82. opts.apply_polyfills = false;
  83. auto prompt = apply(inputs, opts);
  84. // fprintf(stderr, "try_raw_render: %s\n", prompt.c_str());
  85. return prompt;
  86. } catch (const std::exception & e) {
  87. // fprintf(stderr, "try_raw_render error: %s\n", e.what());
  88. return "";
  89. }
  90. }
  91. public:
  92. chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
  93. : source_(source), bos_token_(bos_token), eos_token_(eos_token)
  94. {
  95. template_root_ = minja::Parser::parse(source_, {
  96. /* .trim_blocks = */ true,
  97. /* .lstrip_blocks = */ true,
  98. /* .keep_trailing_newline = */ false,
  99. });
  100. auto contains = [](const std::string & haystack, const std::string & needle) {
  101. return haystack.find(needle) != std::string::npos;
  102. };
  103. const std::string user_needle = "<User Needle>";
  104. const std::string sys_needle = "<System Needle>";
  105. const json dummy_str_user_msg = {{"role", "user"}, {"content", user_needle}};
  106. const json dummy_typed_user_msg = {{"role", "user"}, {"content", json::array({{{"type", "text"}, {"text", user_needle}}})}};
  107. caps_.requires_typed_content =
  108. !contains(try_raw_render(json::array({dummy_str_user_msg}), {}, false), user_needle)
  109. && contains(try_raw_render(json::array({dummy_typed_user_msg}), {}, false), user_needle);
  110. const auto dummy_user_msg = caps_.requires_typed_content
  111. ? dummy_typed_user_msg
  112. : dummy_str_user_msg;
  113. const json needle_system_msg = {
  114. {"role", "system"},
  115. {"content", caps_.requires_typed_content ? json::array({{{"type", "text"}, {"text", sys_needle}}}) : json(sys_needle)},
  116. };
  117. caps_.supports_system_role = contains(try_raw_render({needle_system_msg, dummy_user_msg,}, {}, false), sys_needle);
  118. auto out = try_raw_render(json::array({
  119. dummy_user_msg
  120. }), json::array({
  121. {
  122. {"name", "some_tool"},
  123. {"type", "function"},
  124. {"function", {
  125. {"name", "some_tool"},
  126. {"description", "Some tool."},
  127. {"parameters", {
  128. {"type", "object"},
  129. {"properties", {
  130. {"arg", {
  131. {"type", "string"},
  132. {"description", "Some argument."},
  133. }},
  134. }},
  135. {"required", json::array({ "arg" })},
  136. }},
  137. }},
  138. },
  139. }), false);
  140. caps_.supports_tools = contains(out, "some_tool");
  141. const auto render_with_content = [&](const json & content) {
  142. const json assistant_msg {{"role", "assistant"}, {"content", content}};
  143. // Render two assistant messages as some templates like QwQ-32B are handling
  144. // the content differently depending on whether it's the last message or not
  145. // (to remove the <think> tag in all but the last message).
  146. return try_raw_render(json::array({dummy_user_msg, assistant_msg, dummy_user_msg, assistant_msg}), {}, false);
  147. };
  148. auto out_empty = render_with_content("");
  149. auto out_null = render_with_content(json());
  150. caps_.requires_non_null_content = contains(out_empty, user_needle) && !contains(out_null, user_needle);
  151. json j_null;
  152. auto make_tool_calls_msg = [&](const json & tool_calls) {
  153. return json {
  154. {"role", "assistant"},
  155. {"content", caps_.requires_non_null_content? "" : j_null},
  156. {"tool_calls", tool_calls},
  157. };
  158. };
  159. auto make_tool_call = [](const std::string & tool_name, const json & arguments) {
  160. return json {
  161. {"id", "call_1___"},
  162. {"type", "function"},
  163. {"function", {
  164. {"arguments", arguments},
  165. {"name", tool_name},
  166. }},
  167. };
  168. };
  169. const json dummy_args_obj {{"argument_needle", "print('Hello, World!')"}};
  170. // Note: the arguments are rendered in both cases, but may be double-escaped, which we don't want.
  171. out = try_raw_render(json::array({
  172. dummy_user_msg,
  173. make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj.dump())})),
  174. }), {}, false);
  175. auto tool_call_renders_str_arguments = contains(out, "<parameter=argument_needle>") || contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
  176. out = try_raw_render(json::array({
  177. dummy_user_msg,
  178. make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj)})),
  179. }), {}, false);
  180. auto tool_call_renders_obj_arguments = contains(out, "<parameter=argument_needle>") || contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
  181. caps_.supports_tool_calls = tool_call_renders_str_arguments || tool_call_renders_obj_arguments;
  182. caps_.requires_object_arguments = !tool_call_renders_str_arguments && tool_call_renders_obj_arguments;
  183. if (caps_.supports_tool_calls) {
  184. auto dummy_args = caps_.requires_object_arguments ? dummy_args_obj : json(dummy_args_obj.dump());
  185. auto tc1 = make_tool_call("test_tool1", dummy_args);
  186. auto tc2 = make_tool_call("test_tool2", dummy_args);
  187. auto out = try_raw_render(json::array({
  188. dummy_user_msg,
  189. make_tool_calls_msg(json::array({tc1, tc2})),
  190. }), {}, false);
  191. caps_.supports_parallel_tool_calls = contains(out, "test_tool1") && contains(out, "test_tool2");
  192. out = try_raw_render(json::array({
  193. dummy_user_msg,
  194. make_tool_calls_msg(json::array({tc1})),
  195. {
  196. {"role", "tool"},
  197. {"name", "test_tool1"},
  198. {"content", "Some response!"},
  199. {"tool_call_id", "call_911_"},
  200. }
  201. }), {}, false);
  202. caps_.supports_tool_responses = contains(out, "Some response!");
  203. caps_.supports_tool_call_id = contains(out, "call_911_");
  204. }
  205. try {
  206. if (!caps_.supports_tools) {
  207. const json user_msg {
  208. {"role", "user"},
  209. {"content", "Hey"},
  210. };
  211. const json args {
  212. {"arg1", "some_value"},
  213. };
  214. const json tool_call_msg {
  215. {"role", "assistant"},
  216. {"content", caps_.requires_non_null_content ? "" : j_null},
  217. {"tool_calls", json::array({
  218. {
  219. // TODO: detect if requires numerical id or fixed length == 6 like Nemo
  220. {"id", "call_1___"},
  221. {"type", "function"},
  222. {"function", {
  223. {"name", "tool_name"},
  224. {"arguments", (caps_.requires_object_arguments ? args : json(minja::Value(args).dump(-1, /* to_json= */ true)))},
  225. }},
  226. },
  227. })},
  228. };
  229. std::string prefix, full;
  230. {
  231. chat_template_inputs inputs;
  232. inputs.messages = json::array({user_msg});
  233. inputs.add_generation_prompt = true;
  234. prefix = apply(inputs);
  235. }
  236. {
  237. chat_template_inputs inputs;
  238. inputs.messages = json::array({user_msg, tool_call_msg});
  239. inputs.add_generation_prompt = false;
  240. full = apply(inputs);
  241. }
  242. auto eos_pos_last = full.rfind(eos_token_);
  243. if (eos_pos_last == prefix.size() - eos_token_.size() ||
  244. (full[full.size() - 1] == '\n' && (eos_pos_last == full.size() - eos_token_.size() - 1))) {
  245. full = full.substr(0, eos_pos_last);
  246. }
  247. size_t common_prefix_length = 0;
  248. for (size_t i = 0; i < prefix.size() && i < full.size(); ++i) {
  249. if (prefix[i] != full[i]) {
  250. break;
  251. }
  252. if (prefix[i] == '<') {
  253. // DeepSeek R1's template (as of 20250209) adds a trailing <think> if add_generation_prompt,
  254. // but it removes thinking tags for past messages.
  255. // The prefix and full strings diverge at <think> vs. <|tool▁calls▁begin|>, we avoid consuming the leading <.
  256. continue;
  257. }
  258. common_prefix_length = i + 1;
  259. }
  260. auto example = full.substr(common_prefix_length);
  261. if (example.find("tool_name") == std::string::npos && example.find("some_value") == std::string::npos) {
  262. fprintf(stderr, "Failed to infer a tool call example (possible template bug)\n");
  263. } else {
  264. tool_call_example_ = example;
  265. }
  266. }
  267. } catch (const std::exception & e) {
  268. fprintf(stderr, "Failed to generate tool call example: %s\n", e.what());
  269. }
  270. }
  271. const std::string & source() const { return source_; }
  272. const std::string & bos_token() const { return bos_token_; }
  273. const std::string & eos_token() const { return eos_token_; }
  274. const chat_template_caps & original_caps() const { return caps_; }
  275. // Deprecated, please use the form with chat_template_inputs and chat_template_options
  276. std::string apply(
  277. const nlohmann::ordered_json & messages,
  278. const nlohmann::ordered_json & tools,
  279. bool add_generation_prompt,
  280. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json(),
  281. bool apply_polyfills = true)
  282. {
  283. fprintf(stderr, "[%s] Deprecated!\n", __func__);
  284. chat_template_inputs inputs;
  285. inputs.messages = messages;
  286. inputs.tools = tools;
  287. inputs.add_generation_prompt = add_generation_prompt;
  288. inputs.extra_context = extra_context;
  289. inputs.now = std::chrono::system_clock::now();
  290. chat_template_options opts;
  291. opts.apply_polyfills = apply_polyfills;
  292. return apply(inputs, opts);
  293. }
  294. std::string apply(
  295. const chat_template_inputs & inputs,
  296. const chat_template_options & opts = chat_template_options()) const
  297. {
  298. json actual_messages;
  299. auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
  300. auto has_tool_calls = false;
  301. auto has_tool_responses = false;
  302. auto has_string_content = false;
  303. for (const auto & message : inputs.messages) {
  304. if (message.contains("tool_calls") && !message["tool_calls"].is_null()) {
  305. has_tool_calls = true;
  306. }
  307. if (message.contains("role") && message["role"] == "tool") {
  308. has_tool_responses = true;
  309. }
  310. if (message.contains("content") && message["content"].is_string()) {
  311. has_string_content = true;
  312. }
  313. }
  314. auto polyfill_system_role = opts.polyfill_system_role && !caps_.supports_system_role;
  315. auto polyfill_tools = opts.polyfill_tools && has_tools && !caps_.supports_tools;
  316. auto polyfill_tool_call_example = polyfill_tools && opts.polyfill_tool_call_examples;
  317. auto polyfill_tool_calls = opts.polyfill_tool_calls && has_tool_calls && !caps_.supports_tool_calls;
  318. auto polyfill_tool_responses = opts.polyfill_tool_responses && has_tool_responses && !caps_.supports_tool_responses;
  319. auto polyfill_object_arguments = opts.polyfill_object_arguments && has_tool_calls && caps_.requires_object_arguments;
  320. auto polyfill_typed_content = opts.polyfill_typed_content && has_string_content && caps_.requires_typed_content;
  321. auto needs_polyfills = opts.apply_polyfills && (false
  322. || polyfill_system_role
  323. || polyfill_tools
  324. || polyfill_tool_calls
  325. || polyfill_tool_responses
  326. || polyfill_object_arguments
  327. || polyfill_typed_content
  328. );
  329. if (needs_polyfills) {
  330. actual_messages = json::array();
  331. auto add_message = [&](const json & msg) {
  332. if (polyfill_typed_content && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) {
  333. actual_messages.push_back({
  334. {"role", msg.at("role")},
  335. {"content", {{
  336. {"type", "text"},
  337. {"text", msg.at("content")},
  338. }}},
  339. });
  340. } else {
  341. actual_messages.push_back(msg);
  342. }
  343. };
  344. std::string pending_system;
  345. auto flush_sys = [&]() {
  346. if (!pending_system.empty()) {
  347. add_message({
  348. {"role", "user"},
  349. {"content", pending_system},
  350. });
  351. pending_system.clear();
  352. }
  353. };
  354. json adjusted_messages;
  355. if (polyfill_tools) {
  356. adjusted_messages = add_system(inputs.messages,
  357. "You can call any of the following tools to satisfy the user's requests: " + minja::Value(inputs.tools).dump(2, /* to_json= */ true) +
  358. (!polyfill_tool_call_example || tool_call_example_.empty() ? "" : "\n\nExample tool call syntax:\n\n" + tool_call_example_ + "\n\n"));
  359. } else {
  360. adjusted_messages = inputs.messages;
  361. }
  362. for (const auto & message_ : adjusted_messages) {
  363. auto message = message_;
  364. if (!message.contains("role") || (!message.contains("content") && !message.contains("tool_calls"))) {
  365. throw std::runtime_error("message must have 'role' and one of 'content' or 'tool_calls' fields: " + message.dump());
  366. }
  367. std::string role = message.at("role");
  368. if (message.contains("tool_calls")) {
  369. if (polyfill_object_arguments || polyfill_tool_calls) {
  370. for (auto & tool_call : message.at("tool_calls")) {
  371. if (tool_call["type"] == "function") {
  372. auto & function = tool_call.at("function");
  373. auto & arguments = function.at("arguments");
  374. if (arguments.is_string()) {
  375. try {
  376. arguments = json::parse(arguments.get<std::string>());
  377. } catch (const std::exception & ecvt) {
  378. fprintf(stderr, "Failed to parse arguments: %s\n", ecvt.what());
  379. }
  380. }
  381. }
  382. }
  383. }
  384. if (polyfill_tool_calls) {
  385. auto tool_calls = json::array();
  386. for (const auto & tool_call : message.at("tool_calls")) {
  387. if (tool_call.at("type") != "function") {
  388. continue;
  389. }
  390. const auto & function = tool_call.at("function");
  391. auto tc = json {
  392. {"name", function.at("name")},
  393. {"arguments", function.at("arguments")},
  394. };
  395. if (tool_call.contains("id")) {
  396. tc["id"] = tool_call["id"];
  397. }
  398. tool_calls.push_back(tc);
  399. }
  400. auto obj = json {
  401. {"tool_calls", tool_calls},
  402. };
  403. if (message.contains("content")) {
  404. auto content = message.at("content");
  405. if (!content.is_null() && !content.empty()) {
  406. obj["content"] = content;
  407. }
  408. }
  409. message["content"] = obj.dump(2);
  410. message.erase("tool_calls");
  411. }
  412. }
  413. if (polyfill_tool_responses && role == "tool") {
  414. message["role"] = "user";
  415. auto obj = json {
  416. {"tool_response", json::object()},
  417. };
  418. if (message.contains("name")) {
  419. obj["tool_response"]["tool"] = message.at("name");
  420. }
  421. obj["tool_response"]["content"] = message.at("content");
  422. if (message.contains("tool_call_id")) {
  423. obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
  424. }
  425. message["content"] = obj.dump(2);
  426. message.erase("name");
  427. }
  428. if (!message["content"].is_null() && polyfill_system_role) {
  429. std::string content = message.at("content");
  430. if (role == "system") {
  431. if (!pending_system.empty()) pending_system += "\n";
  432. pending_system += content;
  433. continue;
  434. } else {
  435. if (role == "user") {
  436. if (!pending_system.empty()) {
  437. message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
  438. pending_system.clear();
  439. }
  440. } else {
  441. flush_sys();
  442. }
  443. }
  444. }
  445. add_message(message);
  446. }
  447. flush_sys();
  448. } else {
  449. actual_messages = inputs.messages;
  450. }
  451. auto context = minja::Context::make(json({
  452. {"messages", actual_messages},
  453. {"add_generation_prompt", inputs.add_generation_prompt},
  454. }));
  455. context->set("bos_token", opts.use_bos_token ? bos_token_ : "");
  456. context->set("eos_token", opts.use_eos_token ? eos_token_ : "");
  457. if (opts.define_strftime_now) {
  458. auto now = inputs.now;
  459. context->set("strftime_now", Value::callable([now](const std::shared_ptr<minja::Context> &, minja::ArgumentsValue & args) {
  460. args.expectArgs("strftime_now", {1, 1}, {0, 0});
  461. auto format = args.args[0].get<std::string>();
  462. auto time = std::chrono::system_clock::to_time_t(now);
  463. auto local_time = *std::localtime(&time);
  464. std::ostringstream ss;
  465. ss << std::put_time(&local_time, format.c_str());
  466. return ss.str();
  467. }));
  468. }
  469. if (!inputs.tools.is_null()) {
  470. context->set("tools", minja::Value(inputs.tools));
  471. }
  472. if (!inputs.extra_context.is_null()) {
  473. for (auto & kv : inputs.extra_context.items()) {
  474. context->set(kv.key(), minja::Value(kv.value()));
  475. }
  476. }
  477. auto ret = template_root_->render(context);
  478. // fprintf(stderr, "actual_messages: %s\n", actual_messages.dump(2).c_str());
  479. // fprintf(stderr, "apply: %s\n\n", ret.c_str());
  480. return ret;
  481. }
  482. static nlohmann::ordered_json add_system(const nlohmann::ordered_json & messages, const std::string & system_prompt) {
  483. json messages_with_system = messages;
  484. if (!messages_with_system.empty() && messages_with_system[0].at("role") == "system") {
  485. std::string existing_system = messages_with_system.at(0).at("content");
  486. messages_with_system[0] = json {
  487. {"role", "system"},
  488. {"content", existing_system + "\n\n" + system_prompt},
  489. };
  490. } else {
  491. messages_with_system.insert(messages_with_system.begin(), json {
  492. {"role", "system"},
  493. {"content", system_prompt},
  494. });
  495. }
  496. return messages_with_system;
  497. }
  498. };
  499. } // namespace minja