mtmd-cli.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. #include "arg.h"
  2. #include "log.h"
  3. #include "common.h"
  4. #include "sampling.h"
  5. #include "llama.h"
  6. #include "ggml.h"
  7. #include "console.h"
  8. #include "chat.h"
  9. #include "mtmd.h"
  10. #include "mtmd-helper.h"
  11. #include <vector>
  12. #include <limits.h>
  13. #include <cinttypes>
  14. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  15. #include <signal.h>
  16. #include <unistd.h>
  17. #elif defined (_WIN32)
  18. #define WIN32_LEAN_AND_MEAN
  19. #ifndef NOMINMAX
  20. #define NOMINMAX
  21. #endif
  22. #include <windows.h>
  23. #include <signal.h>
  24. #endif
  25. // volatile, because of signal being an interrupt
  26. static volatile bool g_is_generating = false;
  27. static volatile bool g_is_interrupted = false;
  28. /**
  29. * Please note that this is NOT a production-ready stuff.
  30. * It is a playground for trying multimodal support in llama.cpp.
  31. * For contributors: please keep this code simple and easy to understand.
  32. */
  33. static void show_additional_info(int /*argc*/, char ** argv) {
  34. LOG(
  35. "Experimental CLI for multimodal\n\n"
  36. "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> --audio <audio> -p <prompt>\n\n"
  37. " -m and --mmproj are required\n"
  38. " -hf user/repo can replace both -m and --mmproj in most cases\n"
  39. " --image, --audio and -p are optional, if NOT provided, the CLI will run in chat mode\n"
  40. " to disable using GPU for mmproj model, add --no-mmproj-offload\n",
  41. argv[0]
  42. );
  43. }
  44. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  45. static void sigint_handler(int signo) {
  46. if (signo == SIGINT) {
  47. if (g_is_generating) {
  48. g_is_generating = false;
  49. } else {
  50. console::cleanup();
  51. if (g_is_interrupted) {
  52. _exit(1);
  53. }
  54. g_is_interrupted = true;
  55. }
  56. }
  57. }
  58. #endif
  59. struct mtmd_cli_context {
  60. mtmd::context_ptr ctx_vision;
  61. common_init_result llama_init;
  62. llama_model * model;
  63. llama_context * lctx;
  64. const llama_vocab * vocab;
  65. common_sampler * smpl;
  66. llama_batch batch;
  67. int n_batch;
  68. mtmd::bitmaps bitmaps;
  69. // note: we know that gemma3 template is "linear", meaning each turn is completely separated to another
  70. // so here we don't need to keep track of chat history
  71. common_chat_templates_ptr tmpls;
  72. // support for legacy templates (models not having EOT token)
  73. llama_tokens antiprompt_tokens;
  74. int n_threads = 1;
  75. llama_pos n_past = 0;
  76. mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
  77. model = llama_init.model.get();
  78. lctx = llama_init.context.get();
  79. vocab = llama_model_get_vocab(model);
  80. smpl = common_sampler_init(model, params.sampling);
  81. n_threads = params.cpuparams.n_threads;
  82. batch = llama_batch_init(1, 0, 1); // batch for next token generation
  83. n_batch = params.n_batch;
  84. if (!model || !lctx) {
  85. exit(1);
  86. }
  87. if (!llama_model_chat_template(model, nullptr) && params.chat_template.empty()) {
  88. LOG_ERR("Model does not have chat template.\n");
  89. LOG_ERR(" For old llava models, you may need to use '--chat-template vicuna'\n");
  90. LOG_ERR(" For MobileVLM models, use '--chat-template deepseek'\n");
  91. LOG_ERR(" For Mistral Small 3.1, use '--chat-template mistral-v7'\n");
  92. exit(1);
  93. }
  94. tmpls = common_chat_templates_init(model, params.chat_template);
  95. LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja).c_str());
  96. init_vision_context(params);
  97. // load antiprompt tokens for legacy templates
  98. if (params.chat_template == "vicuna") {
  99. antiprompt_tokens = common_tokenize(lctx, "ASSISTANT:", false, true);
  100. } else if (params.chat_template == "deepseek") {
  101. antiprompt_tokens = common_tokenize(lctx, "###", false, true);
  102. }
  103. }
  104. ~mtmd_cli_context() {
  105. llama_batch_free(batch);
  106. common_sampler_free(smpl);
  107. }
  108. void init_vision_context(common_params & params) {
  109. const char * clip_path = params.mmproj.path.c_str();
  110. mtmd_context_params mparams = mtmd_context_params_default();
  111. mparams.use_gpu = params.mmproj_use_gpu;
  112. mparams.print_timings = true;
  113. mparams.n_threads = params.cpuparams.n_threads;
  114. mparams.verbosity = params.verbosity > 0 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_INFO;
  115. ctx_vision.reset(mtmd_init_from_file(clip_path, model, mparams));
  116. if (!ctx_vision.get()) {
  117. LOG_ERR("Failed to load vision model from %s\n", clip_path);
  118. exit(1);
  119. }
  120. }
  121. bool check_antiprompt(const llama_tokens & generated_tokens) {
  122. if (antiprompt_tokens.empty() || generated_tokens.size() < antiprompt_tokens.size()) {
  123. return false;
  124. }
  125. return std::equal(
  126. generated_tokens.end() - antiprompt_tokens.size(),
  127. generated_tokens.end(),
  128. antiprompt_tokens.begin()
  129. );
  130. }
  131. bool load_media(const std::string & fname) {
  132. mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(ctx_vision.get(), fname.c_str()));
  133. if (!bmp.ptr) {
  134. return false;
  135. }
  136. bitmaps.entries.push_back(std::move(bmp));
  137. return true;
  138. }
  139. };
  140. static int generate_response(mtmd_cli_context & ctx, int n_predict) {
  141. llama_tokens generated_tokens;
  142. for (int i = 0; i < n_predict; i++) {
  143. if (i > n_predict || !g_is_generating || g_is_interrupted) {
  144. LOG("\n");
  145. break;
  146. }
  147. llama_token token_id = common_sampler_sample(ctx.smpl, ctx.lctx, -1);
  148. generated_tokens.push_back(token_id);
  149. common_sampler_accept(ctx.smpl, token_id, true);
  150. if (llama_vocab_is_eog(ctx.vocab, token_id) || ctx.check_antiprompt(generated_tokens)) {
  151. LOG("\n");
  152. break; // end of generation
  153. }
  154. LOG("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
  155. fflush(stdout);
  156. if (g_is_interrupted) {
  157. LOG("\n");
  158. break;
  159. }
  160. // eval the token
  161. common_batch_clear(ctx.batch);
  162. common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
  163. if (llama_decode(ctx.lctx, ctx.batch)) {
  164. LOG_ERR("failed to decode token\n");
  165. return 1;
  166. }
  167. }
  168. return 0;
  169. }
  170. static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_bos = false) {
  171. common_chat_templates_inputs tmpl_inputs;
  172. tmpl_inputs.messages = {msg};
  173. tmpl_inputs.add_generation_prompt = true;
  174. tmpl_inputs.use_jinja = false; // jinja is buggy here
  175. auto formatted_chat = common_chat_templates_apply(ctx.tmpls.get(), tmpl_inputs);
  176. LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.prompt.c_str());
  177. mtmd_input_text text;
  178. text.text = formatted_chat.prompt.c_str();
  179. text.add_special = add_bos;
  180. text.parse_special = true;
  181. if (g_is_interrupted) return 0;
  182. mtmd::input_chunks chunks(mtmd_input_chunks_init());
  183. auto bitmaps_c_ptr = ctx.bitmaps.c_ptr();
  184. int32_t res = mtmd_tokenize(ctx.ctx_vision.get(),
  185. chunks.ptr.get(), // output
  186. &text, // text
  187. bitmaps_c_ptr.data(),
  188. bitmaps_c_ptr.size());
  189. if (res != 0) {
  190. LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
  191. return 1;
  192. }
  193. ctx.bitmaps.entries.clear();
  194. llama_pos new_n_past;
  195. if (mtmd_helper_eval_chunks(ctx.ctx_vision.get(),
  196. ctx.lctx, // lctx
  197. chunks.ptr.get(), // chunks
  198. ctx.n_past, // n_past
  199. 0, // seq_id
  200. ctx.n_batch, // n_batch
  201. true, // logits_last
  202. &new_n_past)) {
  203. LOG_ERR("Unable to eval prompt\n");
  204. return 1;
  205. }
  206. ctx.n_past = new_n_past;
  207. LOG("\n");
  208. return 0;
  209. }
  210. int main(int argc, char ** argv) {
  211. ggml_time_init();
  212. common_params params;
  213. params.sampling.temp = 0.2; // lower temp by default for better quality
  214. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
  215. return 1;
  216. }
  217. common_init();
  218. if (params.mmproj.path.empty()) {
  219. show_additional_info(argc, argv);
  220. LOG_ERR("ERR: Missing --mmproj argument\n");
  221. return 1;
  222. }
  223. mtmd_cli_context ctx(params);
  224. LOG("%s: loading model: %s\n", __func__, params.model.path.c_str());
  225. bool is_single_turn = !params.prompt.empty() && !params.image.empty();
  226. int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
  227. // Ctrl+C handling
  228. {
  229. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  230. struct sigaction sigint_action;
  231. sigint_action.sa_handler = sigint_handler;
  232. sigemptyset (&sigint_action.sa_mask);
  233. sigint_action.sa_flags = 0;
  234. sigaction(SIGINT, &sigint_action, NULL);
  235. #elif defined (_WIN32)
  236. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  237. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  238. };
  239. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  240. #endif
  241. }
  242. if (g_is_interrupted) return 130;
  243. if (is_single_turn) {
  244. g_is_generating = true;
  245. if (params.prompt.find(mtmd_default_marker()) == std::string::npos) {
  246. for (size_t i = 0; i < params.image.size(); i++) {
  247. params.prompt += mtmd_default_marker();
  248. }
  249. }
  250. common_chat_msg msg;
  251. msg.role = "user";
  252. msg.content = params.prompt;
  253. for (const auto & image : params.image) {
  254. if (!ctx.load_media(image)) {
  255. return 1; // error is already printed by libmtmd
  256. }
  257. }
  258. if (eval_message(ctx, msg, true)) {
  259. return 1;
  260. }
  261. if (!g_is_interrupted && generate_response(ctx, n_predict)) {
  262. return 1;
  263. }
  264. } else {
  265. LOG("\n Running in chat mode, available commands:");
  266. if (mtmd_support_vision(ctx.ctx_vision.get())) {
  267. LOG("\n /image <path> load an image");
  268. }
  269. if (mtmd_support_audio(ctx.ctx_vision.get())) {
  270. LOG("\n /audio <path> load an audio");
  271. }
  272. LOG("\n /clear clear the chat history");
  273. LOG("\n /quit or /exit exit the program");
  274. LOG("\n");
  275. bool is_first_msg = true;
  276. std::string content;
  277. while (!g_is_interrupted) {
  278. g_is_generating = false;
  279. LOG("\n> ");
  280. console::set_display(console::user_input);
  281. std::string line;
  282. console::readline(line, false);
  283. if (g_is_interrupted) break;
  284. console::set_display(console::reset);
  285. line = string_strip(line);
  286. if (line.empty()) {
  287. continue;
  288. }
  289. if (line == "/quit" || line == "/exit") {
  290. break;
  291. }
  292. if (line == "/clear") {
  293. ctx.n_past = 0;
  294. llama_memory_seq_rm(llama_get_memory(ctx.lctx), 0, 1, -1); // keep BOS
  295. LOG("Chat history cleared\n\n");
  296. continue;
  297. }
  298. g_is_generating = true;
  299. bool is_image = line == "/image" || line.find("/image ") == 0;
  300. bool is_audio = line == "/audio" || line.find("/audio ") == 0;
  301. if (is_image || is_audio) {
  302. if (line.size() < 8) {
  303. LOG_ERR("ERR: Missing media filename\n");
  304. continue;
  305. }
  306. std::string media_path = line.substr(7);
  307. if (ctx.load_media(media_path)) {
  308. LOG("%s %s loaded\n", media_path.c_str(), is_image ? "image" : "audio");
  309. content += mtmd_default_marker();
  310. }
  311. // else, error is already printed by libmtmd
  312. continue;
  313. } else {
  314. content += line;
  315. }
  316. common_chat_msg msg;
  317. msg.role = "user";
  318. msg.content = content;
  319. int ret = eval_message(ctx, msg, is_first_msg);
  320. if (ret) {
  321. return 1;
  322. }
  323. if (g_is_interrupted) break;
  324. if (generate_response(ctx, n_predict)) {
  325. return 1;
  326. }
  327. content.clear();
  328. is_first_msg = false;
  329. }
  330. }
  331. if (g_is_interrupted) LOG("\nInterrupted by user\n");
  332. LOG("\n\n");
  333. llama_perf_context_print(ctx.lctx);
  334. return g_is_interrupted ? 130 : 0;
  335. }