chat-template.hpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. Copyright 2024 Google LLC
  3. Use of this source code is governed by an MIT-style
  4. license that can be found in the LICENSE file or at
  5. https://opensource.org/licenses/MIT.
  6. */
  7. // SPDX-License-Identifier: MIT
  8. #pragma once
  9. #include "minja.hpp"
  10. #include <json.hpp>
  11. #include <string>
  12. #include <vector>
  13. using json = nlohmann::ordered_json;
  14. namespace minja {
  15. class chat_template {
  16. public:
  17. private:
  18. bool supports_tools_ = true;
  19. // Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object.
  20. // Most other templates (and OpenAI's API) expect the arguments object to be stringified.
  21. bool requires_object_arguments_ = false;
  22. bool requires_typed_content_ = false;
  23. bool supports_system_role_ = true;
  24. bool supports_parallel_tool_calls_ = false;
  25. std::string source_;
  26. std::string bos_token_;
  27. std::string eos_token_;
  28. std::shared_ptr<minja::TemplateNode> template_root_;
  29. std::string try_raw_render(
  30. const nlohmann::ordered_json & messages,
  31. const nlohmann::ordered_json & tools,
  32. bool add_generation_prompt,
  33. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
  34. {
  35. try {
  36. auto prompt = apply(messages, tools, add_generation_prompt, extra_context, /* adjust_inputs= */ false);
  37. // fprintf(stderr, "Prompt: %s\n", prompt.c_str());
  38. return prompt;
  39. } catch (const std::exception & e) {
  40. // fprintf(stderr, "Error: %s\n", e.what());
  41. return "";
  42. }
  43. }
  44. public:
  45. chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
  46. : source_(source), bos_token_(bos_token), eos_token_(eos_token)
  47. {
  48. template_root_ = minja::Parser::parse(source_, {
  49. /* .trim_blocks = */ true,
  50. /* .lstrip_blocks = */ true,
  51. /* .keep_trailing_newline = */ false,
  52. });
  53. supports_tools_ = source.find("tools") != std::string::npos;
  54. auto renders_string_arguments =
  55. try_raw_render({
  56. {
  57. {"role", "user"},
  58. {"content", "Hey"}
  59. },
  60. {
  61. {"role", "assistant"},
  62. {"tool_calls", json::array({
  63. {
  64. {"id", "call_1___"},
  65. {"type", "function"},
  66. {"function", {
  67. {"arguments", "{\"code\": \"print('Hello, World!')\"}"},
  68. {"name", "ipython"},
  69. }},
  70. },
  71. })},
  72. }
  73. }, {}, false).find("{\"code\": \"print") != std::string::npos;
  74. if (!renders_string_arguments) {
  75. auto renders_object_arguments =
  76. try_raw_render({
  77. {
  78. {"role", "user"},
  79. {"content", "Hey"}
  80. },
  81. {
  82. {"role", "assistant"},
  83. {"tool_calls", json::array({
  84. {
  85. {"id", "call_1___"},
  86. {"type", "function"},
  87. {"function", {
  88. {"arguments", {
  89. {"code", "print('Hello, World!')"},
  90. }},
  91. {"name", "ipython"},
  92. }},
  93. },
  94. })},
  95. }
  96. }, {}, false).find("{\"code\": \"print") != std::string::npos;
  97. requires_object_arguments_ = renders_object_arguments;
  98. }
  99. supports_parallel_tool_calls_ = source.find("tool_call_id") != std::string::npos;
  100. supports_system_role_ = try_raw_render({
  101. {{"role", "system"}, {"content", "<System Needle>"}},
  102. {{"role", "user"}, {"content", "Hey"}}
  103. }, {}, false).find("<System Needle>") != std::string::npos;
  104. requires_typed_content_ = try_raw_render({{{"role", "user"}, {"content", "Hey"}}}, {}, false).find("Hey") == std::string::npos
  105. && try_raw_render({{{"role", "user"}, {"content", {{{"type", "text"}, {"text", "Hey"}}}}}}, {}, false).find("Hey") != std::string::npos;
  106. }
  107. const std::string & source() const { return source_; }
  108. const std::string & bos_token() const { return bos_token_; }
  109. const std::string & eos_token() const { return eos_token_; }
  110. bool supports_tools() const { return supports_tools_; }
  111. bool supports_parallel_tool_calls() const { return supports_parallel_tool_calls_; }
  112. std::string apply(
  113. const nlohmann::ordered_json & messages,
  114. const nlohmann::ordered_json & tools,
  115. bool add_generation_prompt,
  116. const nlohmann::ordered_json & extra_context = nlohmann::ordered_json(),
  117. bool adjust_inputs = true) const
  118. {
  119. json actual_messages;
  120. // First, "fix" messages so they have a chance to be rendered correctly by the template
  121. if (adjust_inputs && (requires_object_arguments_ || !supports_system_role_ || !supports_tools_ || requires_typed_content_)) {
  122. actual_messages = json::array();
  123. auto add_message = [&](const json & msg) {
  124. if (requires_typed_content_ && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) {
  125. actual_messages.push_back({
  126. {"role", msg.at("role")},
  127. {"content", {{
  128. {"type", "text"},
  129. {"text", msg.at("content")},
  130. }}},
  131. });
  132. } else {
  133. actual_messages.push_back(msg);
  134. }
  135. };
  136. std::string pending_system;
  137. auto flush_sys = [&]() {
  138. if (!pending_system.empty()) {
  139. add_message({
  140. {"role", "user"},
  141. {"content", pending_system},
  142. });
  143. pending_system.clear();
  144. }
  145. };
  146. for (const auto & message_ : messages) {
  147. auto message = message_;
  148. if (!message.contains("role") || !message.contains("content")) {
  149. throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
  150. }
  151. std::string role = message.at("role");
  152. if (message.contains("tool_calls")) {
  153. if (requires_object_arguments_ || !supports_tools_) {
  154. for (auto & tool_call : message.at("tool_calls")) {
  155. if (tool_call["type"] == "function") {
  156. auto & function = tool_call.at("function");
  157. std::string arguments = function.at("arguments");
  158. function["arguments"] = json::parse(arguments);
  159. }
  160. }
  161. }
  162. if (!supports_tools_) {
  163. auto content = message.at("content");
  164. auto tool_calls = json::array();
  165. for (const auto & tool_call : message.at("tool_calls")) {
  166. if (tool_call.at("type") != "function") {
  167. continue;
  168. }
  169. const auto & function = tool_call.at("function");
  170. auto tc = json {
  171. {"name", function.at("name")},
  172. {"arguments", function.at("arguments")},
  173. };
  174. if (tool_call.contains("id")) {
  175. tc["id"] = tool_call["id"];
  176. }
  177. tool_calls.push_back(tc);
  178. }
  179. auto obj = json {
  180. {"tool_calls", tool_calls},
  181. };
  182. if (!content.is_null() && content != "") {
  183. obj["content"] = content;
  184. }
  185. message["content"] = obj.dump(2);
  186. message.erase("tool_calls");
  187. }
  188. }
  189. if (!supports_tools_ && role == "tool") {
  190. message["role"] = "user";
  191. auto obj = json {
  192. {"tool_response", {
  193. {"tool", message.at("name")},
  194. {"content", message.at("content")},
  195. }},
  196. };
  197. if (message.contains("tool_call_id")) {
  198. obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
  199. }
  200. message["content"] = obj.dump(2);
  201. message.erase("name");
  202. }
  203. if (!message["content"].is_null() && !supports_system_role_) {
  204. std::string content = message.at("content");
  205. if (role == "system") {
  206. if (!pending_system.empty()) pending_system += "\n";
  207. pending_system += content;
  208. continue;
  209. } else {
  210. if (role == "user") {
  211. if (!pending_system.empty()) {
  212. message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
  213. pending_system.clear();
  214. }
  215. } else {
  216. flush_sys();
  217. }
  218. }
  219. }
  220. add_message(message);
  221. }
  222. flush_sys();
  223. } else {
  224. actual_messages = messages;
  225. }
  226. auto context = minja::Context::make(json({
  227. {"messages", actual_messages},
  228. {"add_generation_prompt", add_generation_prompt},
  229. {"bos_token", bos_token_},
  230. {"eos_token", eos_token_},
  231. }));
  232. if (!tools.is_null()) {
  233. auto tools_val = minja::Value(tools);
  234. context->set("tools", tools_val);
  235. }
  236. if (!extra_context.is_null()) {
  237. for (auto & kv : extra_context.items()) {
  238. minja::Value val(kv.value());
  239. context->set(kv.key(), val);
  240. }
  241. }
  242. return template_root_->render(context);
  243. }
  244. };
  245. } // namespace minja