|
|
@@ -36,6 +36,7 @@ struct server_params
|
|
|
std::string hostname = "127.0.0.1";
|
|
|
std::vector<std::string> api_keys;
|
|
|
std::string public_path = "examples/server/public";
|
|
|
+ std::string chat_template = "chatml";
|
|
|
int32_t port = 8080;
|
|
|
int32_t read_timeout = 600;
|
|
|
int32_t write_timeout = 600;
|
|
|
@@ -1859,6 +1860,8 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|
|
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
|
|
printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
|
|
|
printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
|
|
|
+ printf(" --chat-template FORMAT_NAME");
|
|
|
+ printf(" set chat template, possible valus is: llama2, chatml (default %s)", sparams.chat_template.c_str());
|
|
|
printf("\n");
|
|
|
}
|
|
|
|
|
|
@@ -2290,6 +2293,21 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|
|
log_set_target(stdout);
|
|
|
LOG_INFO("logging to file is disabled.", {});
|
|
|
}
|
|
|
+ else if (arg == "--chat-template")
|
|
|
+ {
|
|
|
+ if (++i >= argc)
|
|
|
+ {
|
|
|
+ invalid_param = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ std::string value(argv[i]);
|
|
|
+ if (value != "chatml" && value != "llama2") {
|
|
|
+ fprintf(stderr, "error: chat template can be \"llama2\" or \"chatml\", but got: %s\n", value.c_str());
|
|
|
+ invalid_param = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ sparams.chat_template = value;
|
|
|
+ }
|
|
|
else if (arg == "--override-kv")
|
|
|
{
|
|
|
if (++i >= argc) {
|
|
|
@@ -2743,13 +2761,13 @@ int main(int argc, char **argv)
|
|
|
|
|
|
|
|
|
// TODO: add mount point without "/v1" prefix -- how?
|
|
|
- svr.Post("/v1/chat/completions", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
|
|
|
+ svr.Post("/v1/chat/completions", [&llama, &validate_api_key, &sparams](const httplib::Request &req, httplib::Response &res)
|
|
|
{
|
|
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
|
|
if (!validate_api_key(req, res)) {
|
|
|
return;
|
|
|
}
|
|
|
- json data = oaicompat_completion_params_parse(json::parse(req.body));
|
|
|
+ json data = oaicompat_completion_params_parse(json::parse(req.body), sparams.chat_template);
|
|
|
|
|
|
const int task_id = llama.queue_tasks.get_new_id();
|
|
|
llama.queue_results.add_waiting_task_id(task_id);
|