|
|
@@ -762,12 +762,6 @@ bool common_arg_utils::is_autoy(const std::string & value) {
|
|
|
}
|
|
|
|
|
|
common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
|
|
|
- // default values specific to example
|
|
|
- // note: we place it here instead of inside server.cpp to allow llama-gen-docs to pick it up
|
|
|
- if (ex == LLAMA_EXAMPLE_SERVER) {
|
|
|
- params.use_jinja = true;
|
|
|
- }
|
|
|
-
|
|
|
params.use_color = tty_can_use_colors();
|
|
|
|
|
|
// load dynamic backends
|
|
|
@@ -2623,14 +2617,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
|
|
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_MODELS_AUTOLOAD"));
|
|
|
add_opt(common_arg(
|
|
|
{"--jinja"},
|
|
|
- string_format("use jinja template for chat (default: %s)\n", params.use_jinja ? "enabled" : "disabled"),
|
|
|
+ string_format("use jinja template for chat (default: %s)", params.use_jinja ? "enabled" : "disabled"),
|
|
|
[](common_params & params) {
|
|
|
params.use_jinja = true;
|
|
|
}
|
|
|
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_COMPLETION, LLAMA_EXAMPLE_CLI, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_JINJA"));
|
|
|
add_opt(common_arg(
|
|
|
{"--no-jinja"},
|
|
|
- string_format("disable jinja template for chat (default: %s)\n", params.use_jinja ? "enabled" : "disabled"),
|
|
|
+ string_format("disable jinja template for chat (default: %s)", params.use_jinja ? "disabled" : "enabled"),
|
|
|
[](common_params & params) {
|
|
|
params.use_jinja = false;
|
|
|
}
|