| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353 |
- #include "arg.h"
- #include "log.h"
- #include "common.h"
- #include "sampling.h"
- #include "llama.h"
- #include "ggml.h"
- #include "console.h"
- #include "chat.h"
- #include "mtmd.h"
- #include <vector>
- #include <limits.h>
- #include <cinttypes>
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
- #include <signal.h>
- #include <unistd.h>
- #elif defined (_WIN32)
- #define WIN32_LEAN_AND_MEAN
- #ifndef NOMINMAX
- #define NOMINMAX
- #endif
- #include <windows.h>
- #include <signal.h>
- #endif
- // volatile, because of signal being an interrupt
- static volatile bool g_is_generating = false;
- static volatile bool g_is_interrupted = false;
- /**
- * Please note that this is NOT a production-ready stuff.
- * It is a playground for trying multimodal support in llama.cpp.
- * For contributors: please keep this code simple and easy to understand.
- */
- static void show_additional_info(int /*argc*/, char ** argv) {
- LOG(
- "Experimental CLI for multimodal\n\n"
- "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> -p <prompt>\n\n"
- " -m and --mmproj are required\n"
- " -hf user/repo can replace both -m and --mmproj in most cases\n"
- " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n"
- " to disable using GPU for mmproj model, add --no-mmproj-offload\n",
- argv[0]
- );
- }
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
- static void sigint_handler(int signo) {
- if (signo == SIGINT) {
- if (g_is_generating) {
- g_is_generating = false;
- } else {
- console::cleanup();
- if (g_is_interrupted) {
- _exit(1);
- }
- g_is_interrupted = true;
- }
- }
- }
- #endif
- struct mtmd_cli_context {
- mtmd_context_ptr ctx_vision;
- common_init_result llama_init;
- llama_model * model;
- llama_context * lctx;
- const llama_vocab * vocab;
- llama_batch batch;
- int n_batch;
- std::vector<mtmd_bitmap> bitmaps;
- // note: we know that gemma3 template is "linear", meaning each turn is completely separated to another
- // so here we don't need to keep track of chat history
- common_chat_templates_ptr tmpls;
- // support for legacy templates (models not having EOT token)
- llama_tokens antiprompt_tokens;
- int n_threads = 1;
- llama_pos n_past = 0;
- mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
- model = llama_init.model.get();
- lctx = llama_init.context.get();
- vocab = llama_model_get_vocab(model);
- n_threads = params.cpuparams.n_threads;
- batch = llama_batch_init(params.n_batch, 0, 1);
- n_batch = params.n_batch;
- if (!llama_model_chat_template(model, nullptr) && params.chat_template.empty()) {
- LOG_ERR("Model does not have chat template.\n");
- LOG_ERR(" For old llava models, you may need to use '--chat-template vicuna'\n");
- LOG_ERR(" For MobileVLM models, use '--chat-template deepseek'\n");
- LOG_ERR(" For Mistral Small 3.1, use '--chat-template mistral-v7'\n");
- exit(1);
- }
- tmpls = common_chat_templates_init(model, params.chat_template);
- LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja).c_str());
- init_vision_context(params);
- // load antiprompt tokens for legacy templates
- if (params.chat_template == "vicuna") {
- antiprompt_tokens = common_tokenize(lctx, "ASSISTANT:", false, true);
- } else if (params.chat_template == "deepseek") {
- antiprompt_tokens = common_tokenize(lctx, "###", false, true);
- }
- }
- void init_vision_context(common_params & params) {
- const char * clip_path = params.mmproj.path.c_str();
- ctx_vision.reset(mtmd_init_from_file(clip_path, model, mtmd_context_params{
- /* use_gpu */ params.mmproj_use_gpu,
- /* timings */ true,
- /* n_threads */ params.cpuparams.n_threads,
- /* verbosity */ params.verbosity > 0 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_INFO,
- }));
- if (!ctx_vision.get()) {
- LOG_ERR("Failed to load vision model from %s\n", clip_path);
- exit(1);
- }
- }
- bool check_antiprompt(const llama_tokens & generated_tokens) {
- if (antiprompt_tokens.empty() || generated_tokens.size() < antiprompt_tokens.size()) {
- return false;
- }
- return std::equal(
- generated_tokens.end() - antiprompt_tokens.size(),
- generated_tokens.end(),
- antiprompt_tokens.begin()
- );
- }
- bool load_image(const std::string & fname) {
- mtmd_bitmap bitmap;
- if (mtmd_helper_bitmap_init_from_file(fname.c_str(), bitmap)) {
- return false;
- }
- bitmaps.push_back(std::move(bitmap));
- return true;
- }
- };
- static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) {
- llama_tokens generated_tokens;
- for (int i = 0; i < n_predict; i++) {
- if (i > n_predict || !g_is_generating || g_is_interrupted) {
- LOG("\n");
- break;
- }
- llama_token token_id = common_sampler_sample(smpl, ctx.lctx, -1);
- generated_tokens.push_back(token_id);
- common_sampler_accept(smpl, token_id, true);
- if (llama_vocab_is_eog(ctx.vocab, token_id) || ctx.check_antiprompt(generated_tokens)) {
- LOG("\n");
- break; // end of generation
- }
- LOG("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
- fflush(stdout);
- if (g_is_interrupted) {
- LOG("\n");
- break;
- }
- // eval the token
- common_batch_clear(ctx.batch);
- common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
- if (llama_decode(ctx.lctx, ctx.batch)) {
- LOG_ERR("failed to decode token\n");
- return 1;
- }
- }
- return 0;
- }
- static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_bos = false) {
- common_chat_templates_inputs tmpl_inputs;
- tmpl_inputs.messages = {msg};
- tmpl_inputs.add_generation_prompt = true;
- tmpl_inputs.use_jinja = false; // jinja is buggy here
- auto formatted_chat = common_chat_templates_apply(ctx.tmpls.get(), tmpl_inputs);
- LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.prompt.c_str());
- mtmd_input_text text;
- text.text = formatted_chat.prompt;
- text.add_special = add_bos;
- text.parse_special = true;
- mtmd_input_chunks chunks;
- if (g_is_interrupted) return 0;
- int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, ctx.bitmaps);
- if (res != 0) {
- LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
- return 1;
- }
- ctx.bitmaps.clear();
- if (mtmd_helper_eval(ctx.ctx_vision.get(), ctx.lctx, chunks, ctx.n_past, 0, ctx.n_batch)) {
- LOG_ERR("Unable to eval prompt\n");
- return 1;
- }
- ctx.n_past += mtmd_helper_get_n_pos(chunks);
- LOG("\n");
- return 0;
- }
- int main(int argc, char ** argv) {
- ggml_time_init();
- common_params params;
- params.sampling.temp = 0.2; // lower temp by default for better quality
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
- return 1;
- }
- common_init();
- if (params.mmproj.path.empty()) {
- show_additional_info(argc, argv);
- LOG_ERR("ERR: Missing --mmproj argument\n");
- return 1;
- }
- mtmd_cli_context ctx(params);
- LOG("%s: loading model: %s\n", __func__, params.model.path.c_str());
- bool is_single_turn = !params.prompt.empty() && !params.image.empty();
- struct common_sampler * smpl = common_sampler_init(ctx.model, params.sampling);
- int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
- // ctrl+C handling
- {
- #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
- struct sigaction sigint_action;
- sigint_action.sa_handler = sigint_handler;
- sigemptyset (&sigint_action.sa_mask);
- sigint_action.sa_flags = 0;
- sigaction(SIGINT, &sigint_action, NULL);
- #elif defined (_WIN32)
- auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
- return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
- };
- SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
- #endif
- }
- if (g_is_interrupted) return 130;
- if (is_single_turn) {
- g_is_generating = true;
- if (params.prompt.find("<__image__>") == std::string::npos) {
- params.prompt += " <__image__>";
- }
- common_chat_msg msg;
- msg.role = "user";
- msg.content = params.prompt;
- for (const auto & image : params.image) {
- if (!ctx.load_image(image)) {
- return 1; // error is already printed by libmtmd
- }
- }
- if (eval_message(ctx, msg, true)) {
- return 1;
- }
- if (!g_is_interrupted && generate_response(ctx, smpl, n_predict)) {
- return 1;
- }
- } else {
- LOG("\n Running in chat mode, available commands:");
- LOG("\n /image <path> load an image");
- LOG("\n /clear clear the chat history");
- LOG("\n /quit or /exit exit the program");
- LOG("\n");
- bool is_first_msg = true;
- std::string content;
- while (!g_is_interrupted) {
- g_is_generating = false;
- LOG("\n> ");
- console::set_display(console::user_input);
- std::string line;
- console::readline(line, false);
- if (g_is_interrupted) break;
- console::set_display(console::reset);
- line = string_strip(line);
- if (line.empty()) {
- continue;
- }
- if (line == "/quit" || line == "/exit") {
- break;
- }
- if (line == "/clear") {
- ctx.n_past = 0;
- llama_kv_self_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS
- LOG("Chat history cleared\n\n");
- continue;
- }
- g_is_generating = true;
- if (line == "/image" || line.find("/image ") == 0) {
- if (line.size() < 8) {
- LOG_ERR("ERR: Missing image filename\n");
- continue;
- }
- std::string image = line.substr(7);
- if (ctx.load_image(image)) {
- LOG("Image %s loaded\n", image.c_str());
- content += "<__image__>";
- }
- // else, error is already printed by libmtmd
- continue;
- } else {
- content += line;
- }
- common_chat_msg msg;
- msg.role = "user";
- msg.content = content;
- int ret = eval_message(ctx, msg, is_first_msg);
- if (ret) {
- return 1;
- }
- if (g_is_interrupted) break;
- if (generate_response(ctx, smpl, n_predict)) {
- return 1;
- }
- content.clear();
- is_first_msg = false;
- }
- }
- if (g_is_interrupted) LOG("\nInterrupted by user\n");
- LOG("\n\n");
- llama_perf_context_print(ctx.lctx);
- return g_is_interrupted ? 130 : 0;
- }
|