chat-template.hpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. Copyright 2024 Google LLC
  3. Use of this source code is governed by an MIT-style
  4. license that can be found in the LICENSE file or at
  5. https://opensource.org/licenses/MIT.
  6. */
  7. // SPDX-License-Identifier: MIT
  8. #pragma once
  9. #include "minja.hpp"
  10. #include <json.hpp>
  11. #include <string>
  12. #include <vector>
  13. using json = nlohmann::ordered_json;
  14. namespace minja {
  15. struct chat_template_caps {
  16. bool supports_tools = false;
  17. bool supports_tool_calls = false;
  18. bool supports_tool_responses = false;
  19. bool supports_system_role = false;
  20. bool supports_parallel_tool_calls = false;
  21. bool supports_tool_call_id = false;
  22. // meta-llama/Llama-3.1-8B-Instruct expects arguments to be an object.
  23. // Most other templates (and OpenAI's API) expect the arguments object to be stringified.
  24. bool requires_object_arguments = false;
  25. // CohereForAI/c4ai-command-r-plus simple variant
  26. bool requires_non_null_content = false;
  27. // MiniMaxAI/MiniMax-Text-01 special
  28. bool requires_typed_content = false;
  29. };
  30. class chat_template {
  31. private:
  32. chat_template_caps caps_;
  33. std::string source_;
  34. std::string bos_token_;
  35. std::string eos_token_;
  36. std::shared_ptr<minja::TemplateNode> template_root_;
  37. std::string try_raw_render(
  38. const nlohmann::ordered_json & messages,
  39. const nlohmann::ordered_json & tools,
  40. bool add_generation_prompt,
  41. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
  42. {
  43. try {
  44. auto prompt = apply(messages, tools, add_generation_prompt, extra_context, /* adjust_inputs= */ false);
  45. // fprintf(stderr, "try_raw_render: %s\n", prompt.c_str());
  46. return prompt;
  47. } catch (const std::exception & e) {
  48. // fprintf(stderr, "try_raw_render error: %s\n", e.what());
  49. return "";
  50. }
  51. }
  52. public:
  53. chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
  54. : source_(source), bos_token_(bos_token), eos_token_(eos_token)
  55. {
  56. template_root_ = minja::Parser::parse(source_, {
  57. /* .trim_blocks = */ true,
  58. /* .lstrip_blocks = */ true,
  59. /* .keep_trailing_newline = */ false,
  60. });
  61. auto contains = [](const std::string & haystack, const std::string & needle) {
  62. return haystack.find(needle) != std::string::npos;
  63. };
  64. const std::string user_needle = "<User Needle>";
  65. const std::string sys_needle = "<System Needle>";
  66. const json dummy_str_user_msg = {{"role", "user"}, {"content", user_needle}};
  67. const json dummy_typed_user_msg = {{"role", "user"}, {"content", json::array({{{"type", "text"}, {"text", user_needle}}})}};
  68. caps_.requires_typed_content =
  69. !contains(try_raw_render(json::array({dummy_str_user_msg}), {}, false), user_needle)
  70. && contains(try_raw_render(json::array({dummy_typed_user_msg}), {}, false), user_needle);
  71. const auto dummy_user_msg = caps_.requires_typed_content
  72. ? dummy_typed_user_msg
  73. : dummy_str_user_msg;
  74. const json needle_system_msg = {
  75. {"role", "system"},
  76. {"content", caps_.requires_typed_content ? json::array({{{"type", "text"}, {"text", sys_needle}}}) : json(sys_needle)},
  77. };
  78. caps_.supports_system_role = contains(try_raw_render({needle_system_msg, dummy_user_msg,}, {}, false), sys_needle);
  79. auto out = try_raw_render(json::array({
  80. dummy_user_msg
  81. }), json::array({
  82. {
  83. {"name", "some_tool"},
  84. {"type", "function"},
  85. {"function", {
  86. {"name", "some_tool"},
  87. {"description", "Some tool."},
  88. {"parameters", {
  89. {"type", "object"},
  90. {"properties", {
  91. {"arg", {
  92. {"type", "string"},
  93. {"description", "Some argument."},
  94. }},
  95. }},
  96. {"required", json::array({ "arg" })},
  97. }},
  98. }},
  99. },
  100. }), false);
  101. caps_.supports_tools = contains(out, "some_tool");
  102. auto make_tool_calls_msg = [&](const json & tool_calls) {
  103. return json {
  104. {"role", "assistant"},
  105. {"content", nullptr},
  106. {"tool_calls", tool_calls},
  107. };
  108. };
  109. auto make_tool_call = [](const std::string & tool_name, const json & arguments) {
  110. return json {
  111. {"id", "call_1___"},
  112. {"type", "function"},
  113. {"function", {
  114. {"arguments", arguments},
  115. {"name", tool_name},
  116. }},
  117. };
  118. };
  119. const json dummy_args_obj {{"argument_needle", "print('Hello, World!')"}};
  120. // Note: the arguments are rendered in both cases, but may be double-escaped, which we don't want.
  121. out = try_raw_render(json::array({
  122. dummy_user_msg,
  123. make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj.dump())})),
  124. }), {}, false);
  125. auto tool_call_renders_str_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
  126. out = try_raw_render(json::array({
  127. dummy_user_msg,
  128. make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj)})),
  129. }), {}, false);
  130. auto tool_call_renders_obj_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':");
  131. caps_.supports_tool_calls = tool_call_renders_str_arguments || tool_call_renders_obj_arguments;
  132. caps_.requires_object_arguments = !tool_call_renders_str_arguments && tool_call_renders_obj_arguments;
  133. auto out_empty = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", ""}}}), {}, false);
  134. auto out_null = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", nullptr}}}), {}, false);
  135. caps_.requires_non_null_content = contains(out_empty, user_needle) && !contains(out_null, user_needle);
  136. if (caps_.supports_tool_calls) {
  137. auto dummy_args = caps_.requires_object_arguments ? dummy_args_obj : json(dummy_args_obj.dump());
  138. auto tc1 = make_tool_call("test_tool1", dummy_args);
  139. auto tc2 = make_tool_call("test_tool2", dummy_args);
  140. auto out = try_raw_render(json::array({
  141. dummy_user_msg,
  142. make_tool_calls_msg(json::array({tc1, tc2})),
  143. }), {}, false);
  144. caps_.supports_parallel_tool_calls = contains(out, "test_tool1") && contains(out, "test_tool2");
  145. out = try_raw_render(json::array({
  146. dummy_user_msg,
  147. make_tool_calls_msg(json::array({tc1})),
  148. {
  149. {"role", "tool"},
  150. {"name", "test_tool1"},
  151. {"content", "Some response!"},
  152. {"tool_call_id", "call_911_"},
  153. }
  154. }), {}, false);
  155. caps_.supports_tool_responses = contains(out, "Some response!");
  156. caps_.supports_tool_call_id = contains(out, "call_911_");
  157. }
  158. }
  159. const std::string & source() const { return source_; }
  160. const std::string & bos_token() const { return bos_token_; }
  161. const std::string & eos_token() const { return eos_token_; }
  162. const chat_template_caps & original_caps() const { return caps_; }
  163. std::string apply(
  164. const nlohmann::ordered_json & messages,
  165. const nlohmann::ordered_json & tools,
  166. bool add_generation_prompt,
  167. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json(),
  168. bool adjust_inputs = true) const
  169. {
  170. json actual_messages;
  171. auto needs_adjustments = adjust_inputs && (false
  172. || !caps_.supports_system_role
  173. || !caps_.supports_tools
  174. || !caps_.supports_tool_responses
  175. || !caps_.supports_tool_calls
  176. || caps_.requires_object_arguments
  177. || caps_.requires_typed_content
  178. );
  179. if (needs_adjustments) {
  180. actual_messages = json::array();
  181. auto add_message = [&](const json & msg) {
  182. if (caps_.requires_typed_content && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) {
  183. actual_messages.push_back({
  184. {"role", msg.at("role")},
  185. {"content", {{
  186. {"type", "text"},
  187. {"text", msg.at("content")},
  188. }}},
  189. });
  190. } else {
  191. actual_messages.push_back(msg);
  192. }
  193. };
  194. std::string pending_system;
  195. auto flush_sys = [&]() {
  196. if (!pending_system.empty()) {
  197. add_message({
  198. {"role", "user"},
  199. {"content", pending_system},
  200. });
  201. pending_system.clear();
  202. }
  203. };
  204. auto needs_tools_in_system = !tools.is_null() && tools.size() > 0 && !caps_.supports_tools;
  205. for (const auto & message_ : needs_tools_in_system ? add_system(messages, "Available tools: " + tools.dump(2)) : messages) {
  206. auto message = message_;
  207. if (!message.contains("role") || !message.contains("content")) {
  208. throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
  209. }
  210. std::string role = message.at("role");
  211. if (message.contains("tool_calls")) {
  212. if (caps_.requires_object_arguments || !caps_.supports_tool_calls) {
  213. for (auto & tool_call : message.at("tool_calls")) {
  214. if (tool_call["type"] == "function") {
  215. auto & function = tool_call.at("function");
  216. auto & arguments = function.at("arguments");
  217. if (arguments.is_string()) {
  218. try {
  219. arguments = json::parse(arguments.get<std::string>());
  220. } catch (const std::exception & ecvt) {
  221. fprintf(stderr, "Failed to parse arguments: %s\n", ecvt.what());
  222. }
  223. }
  224. }
  225. }
  226. }
  227. if (!caps_.supports_tool_calls) {
  228. auto content = message.at("content");
  229. auto tool_calls = json::array();
  230. for (const auto & tool_call : message.at("tool_calls")) {
  231. if (tool_call.at("type") != "function") {
  232. continue;
  233. }
  234. const auto & function = tool_call.at("function");
  235. auto tc = json {
  236. {"name", function.at("name")},
  237. {"arguments", function.at("arguments")},
  238. };
  239. if (tool_call.contains("id")) {
  240. tc["id"] = tool_call["id"];
  241. }
  242. tool_calls.push_back(tc);
  243. }
  244. auto obj = json {
  245. {"tool_calls", tool_calls},
  246. };
  247. if (!content.is_null() && content != "") {
  248. obj["content"] = content;
  249. }
  250. message["content"] = obj.dump(2);
  251. message.erase("tool_calls");
  252. }
  253. }
  254. if (!caps_.supports_tool_responses && role == "tool") {
  255. message["role"] = "user";
  256. auto obj = json {
  257. {"tool_response", {
  258. {"content", message.at("content")},
  259. }},
  260. };
  261. if (message.contains("name")) {
  262. obj["tool_response"]["name"] = message.at("name");
  263. }
  264. if (message.contains("tool_call_id")) {
  265. obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
  266. }
  267. message["content"] = obj.dump(2);
  268. message.erase("name");
  269. }
  270. if (!message["content"].is_null() && !caps_.supports_system_role) {
  271. std::string content = message.at("content");
  272. if (role == "system") {
  273. if (!pending_system.empty()) pending_system += "\n";
  274. pending_system += content;
  275. continue;
  276. } else {
  277. if (role == "user") {
  278. if (!pending_system.empty()) {
  279. message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
  280. pending_system.clear();
  281. }
  282. } else {
  283. flush_sys();
  284. }
  285. }
  286. }
  287. add_message(message);
  288. }
  289. if (!caps_.supports_system_role) {
  290. flush_sys();
  291. }
  292. } else {
  293. actual_messages = messages;
  294. }
  295. auto context = minja::Context::make(json({
  296. {"messages", actual_messages},
  297. {"add_generation_prompt", add_generation_prompt},
  298. {"bos_token", bos_token_},
  299. {"eos_token", eos_token_},
  300. }));
  301. if (!tools.is_null()) {
  302. auto tools_val = minja::Value(tools);
  303. context->set("tools", tools_val);
  304. }
  305. if (!extra_context.is_null()) {
  306. for (auto & kv : extra_context.items()) {
  307. minja::Value val(kv.value());
  308. context->set(kv.key(), val);
  309. }
  310. }
  311. auto ret = template_root_->render(context);
  312. // fprintf(stderr, "actual_messages: %s\n", actual_messages.dump(2).c_str());
  313. // fprintf(stderr, "apply: %s\n\n", ret.c_str());
  314. return ret;
  315. }
  316. static nlohmann::ordered_json add_system(const nlohmann::ordered_json & messages, const std::string & system_prompt) {
  317. json messages_with_system = messages;
  318. if (messages_with_system.size() > 0 && messages_with_system[0].at("role") == "system") {
  319. std::string existing_system = messages_with_system.at(0).at("content");
  320. messages_with_system[0] = json {
  321. {"role", "system"},
  322. {"content", existing_system + "\n" + system_prompt},
  323. };
  324. } else {
  325. messages_with_system.insert(messages_with_system.begin(), json {
  326. {"role", "system"},
  327. {"content", system_prompt},
  328. });
  329. }
  330. return messages_with_system;
  331. }
  332. };
  333. } // namespace minja