llama-chat.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. #include "llama-chat.h"
  2. #include "llama.h"
  3. #include <map>
  4. #include <sstream>
  5. #include <algorithm>
  6. #if __cplusplus >= 202000L
  7. #define LU8(x) (const char*)(u8##x)
  8. #else
  9. #define LU8(x) u8##x
  10. #endif
  11. // trim whitespace from the beginning and end of a string
  12. static std::string trim(const std::string & str) {
  13. size_t start = 0;
  14. size_t end = str.size();
  15. while (start < end && isspace(str[start])) {
  16. start += 1;
  17. }
  18. while (end > start && isspace(str[end - 1])) {
  19. end -= 1;
  20. }
  21. return str.substr(start, end - start);
  22. }
  23. static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
  24. { "chatml", LLM_CHAT_TEMPLATE_CHATML },
  25. { "llama2", LLM_CHAT_TEMPLATE_LLAMA_2 },
  26. { "llama2-sys", LLM_CHAT_TEMPLATE_LLAMA_2_SYS },
  27. { "llama2-sys-bos", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS },
  28. { "llama2-sys-strip", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
  29. { "mistral-v1", LLM_CHAT_TEMPLATE_MISTRAL_V1 },
  30. { "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 },
  31. { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
  32. { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 },
  33. { "phi3", LLM_CHAT_TEMPLATE_PHI_3 },
  34. { "phi4", LLM_CHAT_TEMPLATE_PHI_4 },
  35. { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 },
  36. { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR },
  37. { "monarch", LLM_CHAT_TEMPLATE_MONARCH },
  38. { "gemma", LLM_CHAT_TEMPLATE_GEMMA },
  39. { "orion", LLM_CHAT_TEMPLATE_ORION },
  40. { "openchat", LLM_CHAT_TEMPLATE_OPENCHAT },
  41. { "vicuna", LLM_CHAT_TEMPLATE_VICUNA },
  42. { "vicuna-orca", LLM_CHAT_TEMPLATE_VICUNA_ORCA },
  43. { "deepseek", LLM_CHAT_TEMPLATE_DEEPSEEK },
  44. { "deepseek2", LLM_CHAT_TEMPLATE_DEEPSEEK_2 },
  45. { "deepseek3", LLM_CHAT_TEMPLATE_DEEPSEEK_3 },
  46. { "command-r", LLM_CHAT_TEMPLATE_COMMAND_R },
  47. { "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
  48. { "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
  49. { "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
  50. { "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE },
  51. { "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
  52. { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
  53. { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD },
  54. { "granite", LLM_CHAT_TEMPLATE_GRANITE },
  55. { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT },
  56. { "megrez", LLM_CHAT_TEMPLATE_MEGREZ },
  57. };
  58. llm_chat_template llm_chat_template_from_str(const std::string & name) {
  59. return LLM_CHAT_TEMPLATES.at(name);
  60. }
  61. llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
  62. try {
  63. return llm_chat_template_from_str(tmpl);
  64. } catch (const std::out_of_range &) {
  65. // ignore
  66. }
  67. auto tmpl_contains = [&tmpl](const char * haystack) -> bool {
  68. return tmpl.find(haystack) != std::string::npos;
  69. };
  70. if (tmpl_contains("<|im_start|>")) {
  71. return tmpl_contains("<|im_sep|>")
  72. ? LLM_CHAT_TEMPLATE_PHI_4
  73. : LLM_CHAT_TEMPLATE_CHATML;
  74. } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
  75. if (tmpl_contains("[SYSTEM_PROMPT]")) {
  76. return LLM_CHAT_TEMPLATE_MISTRAL_V7;
  77. } else if (
  78. // catches official 'v1' template
  79. tmpl_contains("' [INST] ' + system_message")
  80. // catches official 'v3' and 'v3-tekken' templates
  81. || tmpl_contains("[AVAILABLE_TOOLS]")
  82. ) {
  83. // Official mistral 'v1', 'v3' and 'v3-tekken' templates
  84. // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
  85. // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
  86. if (tmpl_contains(" [INST]")) {
  87. return LLM_CHAT_TEMPLATE_MISTRAL_V1;
  88. } else if (tmpl_contains("\"[INST]\"")) {
  89. return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN;
  90. }
  91. return LLM_CHAT_TEMPLATE_MISTRAL_V3;
  92. } else {
  93. // llama2 template and its variants
  94. // [variant] support system message
  95. // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
  96. bool support_system_message = tmpl_contains("<<SYS>>");
  97. bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
  98. bool strip_message = tmpl_contains("content.strip()");
  99. if (strip_message) {
  100. return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
  101. } else if (add_bos_inside_history) {
  102. return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
  103. } else if (support_system_message) {
  104. return LLM_CHAT_TEMPLATE_LLAMA_2_SYS;
  105. } else {
  106. return LLM_CHAT_TEMPLATE_LLAMA_2;
  107. }
  108. }
  109. } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
  110. return LLM_CHAT_TEMPLATE_PHI_3;
  111. } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
  112. return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
  113. } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
  114. return LLM_CHAT_TEMPLATE_ZEPHYR;
  115. } else if (tmpl_contains("bos_token + message['role']")) {
  116. return LLM_CHAT_TEMPLATE_MONARCH;
  117. } else if (tmpl_contains("<start_of_turn>")) {
  118. return LLM_CHAT_TEMPLATE_GEMMA;
  119. } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
  120. // OrionStarAI/Orion-14B-Chat
  121. return LLM_CHAT_TEMPLATE_ORION;
  122. } else if (tmpl_contains("GPT4 Correct ")) {
  123. // openchat/openchat-3.5-0106
  124. return LLM_CHAT_TEMPLATE_OPENCHAT;
  125. } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) {
  126. // eachadea/vicuna-13b-1.1 (and Orca variant)
  127. if (tmpl_contains("SYSTEM: ")) {
  128. return LLM_CHAT_TEMPLATE_VICUNA_ORCA;
  129. }
  130. return LLM_CHAT_TEMPLATE_VICUNA;
  131. } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) {
  132. // deepseek-ai/deepseek-coder-33b-instruct
  133. return LLM_CHAT_TEMPLATE_DEEPSEEK;
  134. } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) {
  135. // CohereForAI/c4ai-command-r-plus
  136. return LLM_CHAT_TEMPLATE_COMMAND_R;
  137. } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) {
  138. return LLM_CHAT_TEMPLATE_LLAMA_3;
  139. } else if (tmpl_contains("[gMASK]sop")) {
  140. // chatglm3-6b
  141. return LLM_CHAT_TEMPLATE_CHATGML_3;
  142. } else if (tmpl_contains("[gMASK]<sop>")) {
  143. return LLM_CHAT_TEMPLATE_CHATGML_4;
  144. } else if (tmpl_contains(LU8("<用户>"))) {
  145. // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
  146. return LLM_CHAT_TEMPLATE_MINICPM;
  147. } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
  148. return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
  149. } else if (tmpl_contains(LU8("<|Assistant|>")) && tmpl_contains(LU8("<|User|>")) && tmpl_contains(LU8("<|end▁of▁sentence|>"))) {
  150. return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
  151. } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
  152. // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
  153. // EXAONE-3.0-7.8B-Instruct
  154. return LLM_CHAT_TEMPLATE_EXAONE_3;
  155. } else if (tmpl_contains("rwkv-world")) {
  156. return LLM_CHAT_TEMPLATE_RWKV_WORLD;
  157. } else if (tmpl_contains("<|start_of_role|>")) {
  158. return LLM_CHAT_TEMPLATE_GRANITE;
  159. } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) {
  160. return LLM_CHAT_TEMPLATE_GIGACHAT;
  161. } else if (tmpl_contains("<|role_start|>")) {
  162. return LLM_CHAT_TEMPLATE_MEGREZ;
  163. }
  164. return LLM_CHAT_TEMPLATE_UNKNOWN;
  165. }
  166. // Simple version of "llama_apply_chat_template" that only works with strings
  167. // This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
  168. int32_t llm_chat_apply_template(
  169. llm_chat_template tmpl,
  170. const std::vector<const llama_chat_message *> & chat,
  171. std::string & dest, bool add_ass) {
  172. // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
  173. std::stringstream ss;
  174. if (tmpl == LLM_CHAT_TEMPLATE_CHATML) {
  175. // chatml template
  176. for (auto message : chat) {
  177. ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
  178. }
  179. if (add_ass) {
  180. ss << "<|im_start|>assistant\n";
  181. }
  182. } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) {
  183. // Official mistral 'v7' template
  184. // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
  185. for (auto message : chat) {
  186. std::string role(message->role);
  187. std::string content(message->content);
  188. if (role == "system") {
  189. ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]";
  190. } else if (role == "user") {
  191. ss << "[INST] " << content << "[/INST]";
  192. }
  193. else {
  194. ss << " " << content << "</s>";
  195. }
  196. }
  197. } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
  198. || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3
  199. || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) {
  200. // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
  201. // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
  202. std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : "";
  203. std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " ";
  204. bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3;
  205. bool is_inside_turn = false;
  206. for (auto message : chat) {
  207. if (!is_inside_turn) {
  208. ss << leading_space << "[INST]" << trailing_space;
  209. is_inside_turn = true;
  210. }
  211. std::string role(message->role);
  212. std::string content(message->content);
  213. if (role == "system") {
  214. ss << content << "\n\n";
  215. } else if (role == "user") {
  216. ss << content << leading_space << "[/INST]";
  217. } else {
  218. ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "</s>";
  219. is_inside_turn = false;
  220. }
  221. }
  222. } else if (
  223. tmpl == LLM_CHAT_TEMPLATE_LLAMA_2
  224. || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS
  225. || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS
  226. || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) {
  227. // llama2 template and its variants
  228. // [variant] support system message
  229. // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
  230. bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2;
  231. // [variant] add BOS inside history
  232. bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
  233. // [variant] trim spaces from the input message
  234. bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
  235. // construct the prompt
  236. bool is_inside_turn = true; // skip BOS at the beginning
  237. ss << "[INST] ";
  238. for (auto message : chat) {
  239. std::string content = strip_message ? trim(message->content) : message->content;
  240. std::string role(message->role);
  241. if (!is_inside_turn) {
  242. is_inside_turn = true;
  243. ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
  244. }
  245. if (role == "system") {
  246. if (support_system_message) {
  247. ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
  248. } else {
  249. // if the model does not support system message, we still include it in the first message, but without <<SYS>>
  250. ss << content << "\n";
  251. }
  252. } else if (role == "user") {
  253. ss << content << " [/INST]";
  254. } else {
  255. ss << content << "</s>";
  256. is_inside_turn = false;
  257. }
  258. }
  259. } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) {
  260. // Phi 3
  261. for (auto message : chat) {
  262. std::string role(message->role);
  263. ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
  264. }
  265. if (add_ass) {
  266. ss << "<|assistant|>\n";
  267. }
  268. } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_4) {
  269. // chatml template
  270. for (auto message : chat) {
  271. ss << "<|im_start|>" << message->role << "<|im_sep|>" << message->content << "<|im_end|>";
  272. }
  273. if (add_ass) {
  274. ss << "<|im_start|>assistant<|im_sep|>";
  275. }
  276. } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
  277. // Falcon 3
  278. for (auto message : chat) {
  279. std::string role(message->role);
  280. ss << "<|" << role << "|>\n" << message->content << "\n";
  281. }
  282. if (add_ass) {
  283. ss << "<|assistant|>\n";
  284. }
  285. } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
  286. // zephyr template
  287. for (auto message : chat) {
  288. ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
  289. }
  290. if (add_ass) {
  291. ss << "<|assistant|>\n";
  292. }
  293. } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) {
  294. // mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
  295. for (auto message : chat) {
  296. std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
  297. ss << bos << message->role << "\n" << message->content << "</s>\n";
  298. }
  299. if (add_ass) {
  300. ss << "<s>assistant\n";
  301. }
  302. } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) {
  303. // google/gemma-7b-it
  304. std::string system_prompt = "";
  305. for (auto message : chat) {
  306. std::string role(message->role);
  307. if (role == "system") {
  308. // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
  309. system_prompt = trim(message->content);
  310. continue;
  311. }
  312. // in gemma, "assistant" is "model"
  313. role = role == "assistant" ? "model" : message->role;
  314. ss << "<start_of_turn>" << role << "\n";
  315. if (!system_prompt.empty() && role != "model") {
  316. ss << system_prompt << "\n\n";
  317. system_prompt = "";
  318. }
  319. ss << trim(message->content) << "<end_of_turn>\n";
  320. }
  321. if (add_ass) {
  322. ss << "<start_of_turn>model\n";
  323. }
  324. } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) {
  325. // OrionStarAI/Orion-14B-Chat
  326. std::string system_prompt = "";
  327. for (auto message : chat) {
  328. std::string role(message->role);
  329. if (role == "system") {
  330. // there is no system message support, we will merge it with user prompt
  331. system_prompt = message->content;
  332. continue;
  333. } else if (role == "user") {
  334. ss << "Human: ";
  335. if (!system_prompt.empty()) {
  336. ss << system_prompt << "\n\n";
  337. system_prompt = "";
  338. }
  339. ss << message->content << "\n\nAssistant: </s>";
  340. } else {
  341. ss << message->content << "</s>";
  342. }
  343. }
  344. } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) {
  345. // openchat/openchat-3.5-0106,
  346. for (auto message : chat) {
  347. std::string role(message->role);
  348. if (role == "system") {
  349. ss << message->content << "<|end_of_turn|>";
  350. } else {
  351. role[0] = toupper(role[0]);
  352. ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
  353. }
  354. }
  355. if (add_ass) {
  356. ss << "GPT4 Correct Assistant:";
  357. }
  358. } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
  359. // eachadea/vicuna-13b-1.1 (and Orca variant)
  360. for (auto message : chat) {
  361. std::string role(message->role);
  362. if (role == "system") {
  363. // Orca-Vicuna variant uses a system prefix
  364. if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
  365. ss << "SYSTEM: " << message->content << "\n";
  366. } else {
  367. ss << message->content << "\n\n";
  368. }
  369. } else if (role == "user") {
  370. ss << "USER: " << message->content << "\n";
  371. } else if (role == "assistant") {
  372. ss << "ASSISTANT: " << message->content << "</s>\n";
  373. }
  374. }
  375. if (add_ass) {
  376. ss << "ASSISTANT:";
  377. }
  378. } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) {
  379. // deepseek-ai/deepseek-coder-33b-instruct
  380. for (auto message : chat) {
  381. std::string role(message->role);
  382. if (role == "system") {
  383. ss << message->content;
  384. } else if (role == "user") {
  385. ss << "### Instruction:\n" << message->content << "\n";
  386. } else if (role == "assistant") {
  387. ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
  388. }
  389. }
  390. if (add_ass) {
  391. ss << "### Response:\n";
  392. }
  393. } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) {
  394. // CohereForAI/c4ai-command-r-plus
  395. for (auto message : chat) {
  396. std::string role(message->role);
  397. if (role == "system") {
  398. ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  399. } else if (role == "user") {
  400. ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  401. } else if (role == "assistant") {
  402. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  403. }
  404. }
  405. if (add_ass) {
  406. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
  407. }
  408. } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) {
  409. // Llama 3
  410. for (auto message : chat) {
  411. std::string role(message->role);
  412. ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
  413. }
  414. if (add_ass) {
  415. ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
  416. }
  417. } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) {
  418. // chatglm3-6b
  419. ss << "[gMASK]" << "sop";
  420. for (auto message : chat) {
  421. std::string role(message->role);
  422. ss << "<|" << role << "|>" << "\n " << message->content;
  423. }
  424. if (add_ass) {
  425. ss << "<|assistant|>";
  426. }
  427. } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
  428. ss << "[gMASK]" << "<sop>";
  429. for (auto message : chat) {
  430. std::string role(message->role);
  431. ss << "<|" << role << "|>" << "\n" << message->content;
  432. }
  433. if (add_ass) {
  434. ss << "<|assistant|>";
  435. }
  436. } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
  437. for (auto message : chat) {
  438. std::string role(message->role);
  439. ss << "<|" << role << "|>" << "\n" << message->content;
  440. }
  441. if (add_ass) {
  442. ss << "<|assistant|>";
  443. }
  444. } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
  445. // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
  446. for (auto message : chat) {
  447. std::string role(message->role);
  448. if (role == "user") {
  449. ss << LU8("<用户>");
  450. ss << trim(message->content);
  451. ss << "<AI>";
  452. } else {
  453. ss << trim(message->content);
  454. }
  455. }
  456. } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) {
  457. // DeepSeek-V2
  458. for (auto message : chat) {
  459. std::string role(message->role);
  460. if (role == "system") {
  461. ss << message->content << "\n\n";
  462. } else if (role == "user") {
  463. ss << "User: " << message->content << "\n\n";
  464. } else if (role == "assistant") {
  465. ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
  466. }
  467. }
  468. if (add_ass) {
  469. ss << "Assistant:";
  470. }
  471. } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_3) {
  472. // DeepSeek-V3
  473. for (auto message : chat) {
  474. std::string role(message->role);
  475. if (role == "system") {
  476. ss << message->content << "\n\n";
  477. } else if (role == "user") {
  478. ss << LU8("<|User|>") << message->content;
  479. } else if (role == "assistant") {
  480. ss << LU8("<|Assistant|>") << message->content << LU8("<|end▁of▁sentence|>");
  481. }
  482. }
  483. if (add_ass) {
  484. ss << LU8("<|Assistant|>");
  485. }
  486. } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
  487. // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
  488. // EXAONE-3.0-7.8B-Instruct
  489. for (auto message : chat) {
  490. std::string role(message->role);
  491. if (role == "system") {
  492. ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
  493. } else if (role == "user") {
  494. ss << "[|user|]" << trim(message->content) << "\n";
  495. } else if (role == "assistant") {
  496. ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
  497. }
  498. }
  499. if (add_ass) {
  500. ss << "[|assistant|]";
  501. }
  502. } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
  503. // this template requires the model to have "\n\n" as EOT token
  504. for (auto message : chat) {
  505. std::string role(message->role);
  506. if (role == "user") {
  507. ss << "User: " << message->content << "\n\nAssistant:";
  508. } else {
  509. ss << message->content << "\n\n";
  510. }
  511. }
  512. } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
  513. // IBM Granite template
  514. for (const auto & message : chat) {
  515. std::string role(message->role);
  516. ss << "<|start_of_role|>" << role << "<|end_of_role|>";
  517. if (role == "assistant_tool_call") {
  518. ss << "<|tool_call|>";
  519. }
  520. ss << message->content << "<|end_of_text|>\n";
  521. }
  522. if (add_ass) {
  523. ss << "<|start_of_role|>assistant<|end_of_role|>\n";
  524. }
  525. } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) {
  526. // GigaChat template
  527. bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
  528. // Handle system message if present
  529. if (has_system) {
  530. ss << "<s>" << chat[0]->content << "<|message_sep|>";
  531. } else {
  532. ss << "<s>";
  533. }
  534. // Process remaining messages
  535. for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) {
  536. std::string role(chat[i]->role);
  537. if (role == "user") {
  538. ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>"
  539. << "available functions<|role_sep|>[]<|message_sep|>";
  540. } else if (role == "assistant") {
  541. ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>";
  542. }
  543. }
  544. // Add generation prompt if needed
  545. if (add_ass) {
  546. ss << "assistant<|role_sep|>";
  547. }
  548. } else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) {
  549. // Megrez template
  550. for (auto message : chat) {
  551. std::string role(message->role);
  552. ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>";
  553. }
  554. if (add_ass) {
  555. ss << "<|role_start|>assistant<|role_end|>";
  556. }
  557. } else {
  558. // template not supported
  559. return -1;
  560. }
  561. dest = ss.str();
  562. return dest.size();
  563. }
  564. // public interface
  565. int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
  566. auto it = LLM_CHAT_TEMPLATES.begin();
  567. for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
  568. output[i] = it->first.c_str();
  569. std::advance(it, 1);
  570. }
  571. return (int32_t) LLM_CHAT_TEMPLATES.size();
  572. }