|
@@ -21799,8 +21799,11 @@ static int32_t llama_chat_apply_template_internal(
|
|
|
// IBM Granite template
|
|
// IBM Granite template
|
|
|
for (const auto & message : chat) {
|
|
for (const auto & message : chat) {
|
|
|
std::string role(message->role);
|
|
std::string role(message->role);
|
|
|
- ss << "<|start_of_role|>" << role << "<|end_of_role|>"
|
|
|
|
|
- << message->content << "<|end_of_text|>\n";
|
|
|
|
|
|
|
+ ss << "<|start_of_role|>" << role << "<|end_of_role|>";
|
|
|
|
|
+ if (role == "assistant_tool_call") {
|
|
|
|
|
+ ss << "<|tool_call|>";
|
|
|
|
|
+ }
|
|
|
|
|
+ ss << message->content << "<|end_of_text|>\n";
|
|
|
}
|
|
}
|
|
|
if (add_ass) {
|
|
if (add_ass) {
|
|
|
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
|
|
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
|