1
0

mtmd-cli.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. #include "arg.h"
  2. #include "log.h"
  3. #include "common.h"
  4. #include "sampling.h"
  5. #include "llama.h"
  6. #include "ggml.h"
  7. #include "console.h"
  8. #include "chat.h"
  9. #include "mtmd.h"
  10. #include "mtmd-helper.h"
  11. #include <vector>
  12. #include <limits.h>
  13. #include <cinttypes>
  14. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  15. #include <signal.h>
  16. #include <unistd.h>
  17. #elif defined (_WIN32)
  18. #define WIN32_LEAN_AND_MEAN
  19. #ifndef NOMINMAX
  20. #define NOMINMAX
  21. #endif
  22. #include <windows.h>
  23. #include <signal.h>
  24. #endif
  25. // volatile, because of signal being an interrupt
  26. static volatile bool g_is_generating = false;
  27. static volatile bool g_is_interrupted = false;
  28. /**
  29. * Please note that this is NOT a production-ready stuff.
  30. * It is a playground for trying multimodal support in llama.cpp.
  31. * For contributors: please keep this code simple and easy to understand.
  32. */
  33. static void show_additional_info(int /*argc*/, char ** argv) {
  34. LOG(
  35. "Experimental CLI for multimodal\n\n"
  36. "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> --audio <audio> -p <prompt>\n\n"
  37. " -m and --mmproj are required\n"
  38. " -hf user/repo can replace both -m and --mmproj in most cases\n"
  39. " --image, --audio and -p are optional, if NOT provided, the CLI will run in chat mode\n"
  40. " to disable using GPU for mmproj model, add --no-mmproj-offload\n",
  41. argv[0]
  42. );
  43. }
  44. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  45. static void sigint_handler(int signo) {
  46. if (signo == SIGINT) {
  47. if (g_is_generating) {
  48. g_is_generating = false;
  49. } else {
  50. console::cleanup();
  51. if (g_is_interrupted) {
  52. _exit(1);
  53. }
  54. g_is_interrupted = true;
  55. }
  56. }
  57. }
  58. #endif
  59. struct mtmd_cli_context {
  60. mtmd::context_ptr ctx_vision;
  61. common_init_result llama_init;
  62. llama_model * model;
  63. llama_context * lctx;
  64. const llama_vocab * vocab;
  65. common_sampler * smpl;
  66. llama_batch batch;
  67. int n_batch;
  68. mtmd::bitmaps bitmaps;
  69. // chat template
  70. common_chat_templates_ptr tmpls;
  71. std::vector<common_chat_msg> chat_history;
  72. bool use_jinja = false;
  73. // TODO: support for --system-prompt with /clear command
  74. // support for legacy templates (models not having EOT token)
  75. llama_tokens antiprompt_tokens;
  76. int n_threads = 1;
  77. llama_pos n_past = 0;
  78. mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
  79. model = llama_init.model.get();
  80. lctx = llama_init.context.get();
  81. vocab = llama_model_get_vocab(model);
  82. smpl = common_sampler_init(model, params.sampling);
  83. n_threads = params.cpuparams.n_threads;
  84. batch = llama_batch_init(1, 0, 1); // batch for next token generation
  85. n_batch = params.n_batch;
  86. if (!model || !lctx) {
  87. exit(1);
  88. }
  89. if (!llama_model_chat_template(model, nullptr) && params.chat_template.empty()) {
  90. LOG_ERR("Model does not have chat template.\n");
  91. LOG_ERR(" For old llava models, you may need to use '--chat-template vicuna'\n");
  92. LOG_ERR(" For MobileVLM models, use '--chat-template deepseek'\n");
  93. LOG_ERR(" For Mistral Small 3.1, use '--chat-template mistral-v7'\n");
  94. exit(1);
  95. }
  96. tmpls = common_chat_templates_init(model, params.chat_template);
  97. use_jinja = params.use_jinja;
  98. chat_history.clear();
  99. LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja, params.default_template_kwargs).c_str());
  100. init_vision_context(params);
  101. // load antiprompt tokens for legacy templates
  102. if (params.chat_template == "vicuna") {
  103. antiprompt_tokens = common_tokenize(lctx, "ASSISTANT:", false, true);
  104. } else if (params.chat_template == "deepseek") {
  105. antiprompt_tokens = common_tokenize(lctx, "###", false, true);
  106. }
  107. }
  108. ~mtmd_cli_context() {
  109. llama_batch_free(batch);
  110. common_sampler_free(smpl);
  111. }
  112. void init_vision_context(common_params & params) {
  113. const char * clip_path = params.mmproj.path.c_str();
  114. mtmd_context_params mparams = mtmd_context_params_default();
  115. mparams.use_gpu = params.mmproj_use_gpu;
  116. mparams.print_timings = true;
  117. mparams.n_threads = params.cpuparams.n_threads;
  118. mparams.flash_attn_type = params.flash_attn_type;
  119. mparams.warmup = params.warmup;
  120. mparams.image_min_tokens = params.image_min_tokens;
  121. mparams.image_max_tokens = params.image_max_tokens;
  122. ctx_vision.reset(mtmd_init_from_file(clip_path, model, mparams));
  123. if (!ctx_vision.get()) {
  124. LOG_ERR("Failed to load vision model from %s\n", clip_path);
  125. exit(1);
  126. }
  127. }
  128. bool check_antiprompt(const llama_tokens & generated_tokens) {
  129. if (antiprompt_tokens.empty() || generated_tokens.size() < antiprompt_tokens.size()) {
  130. return false;
  131. }
  132. return std::equal(
  133. generated_tokens.end() - antiprompt_tokens.size(),
  134. generated_tokens.end(),
  135. antiprompt_tokens.begin()
  136. );
  137. }
  138. bool load_media(const std::string & fname) {
  139. mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(ctx_vision.get(), fname.c_str()));
  140. if (!bmp.ptr) {
  141. return false;
  142. }
  143. bitmaps.entries.push_back(std::move(bmp));
  144. return true;
  145. }
  146. };
  147. static int generate_response(mtmd_cli_context & ctx, int n_predict) {
  148. llama_tokens generated_tokens;
  149. for (int i = 0; i < n_predict; i++) {
  150. if (i > n_predict || !g_is_generating || g_is_interrupted) {
  151. LOG("\n");
  152. break;
  153. }
  154. llama_token token_id = common_sampler_sample(ctx.smpl, ctx.lctx, -1);
  155. generated_tokens.push_back(token_id);
  156. common_sampler_accept(ctx.smpl, token_id, true);
  157. if (llama_vocab_is_eog(ctx.vocab, token_id) || ctx.check_antiprompt(generated_tokens)) {
  158. LOG("\n");
  159. break; // end of generation
  160. }
  161. LOG("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
  162. fflush(stdout);
  163. if (g_is_interrupted) {
  164. LOG("\n");
  165. break;
  166. }
  167. // eval the token
  168. common_batch_clear(ctx.batch);
  169. common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
  170. if (llama_decode(ctx.lctx, ctx.batch)) {
  171. LOG_ERR("failed to decode token\n");
  172. return 1;
  173. }
  174. }
  175. std::string generated_text = common_detokenize(ctx.lctx, generated_tokens);
  176. common_chat_msg msg;
  177. msg.role = "assistant";
  178. msg.content = generated_text;
  179. ctx.chat_history.push_back(std::move(msg));
  180. return 0;
  181. }
  182. static std::string chat_add_and_format(mtmd_cli_context & ctx, common_chat_msg & new_msg) {
  183. LOG_DBG("chat_add_and_format: new_msg.role='%s', new_msg.content='%s'\n",
  184. new_msg.role.c_str(), new_msg.content.c_str());
  185. auto formatted = common_chat_format_single(ctx.tmpls.get(), ctx.chat_history,
  186. new_msg, new_msg.role == "user",
  187. ctx.use_jinja);
  188. ctx.chat_history.push_back(new_msg);
  189. return formatted;
  190. }
  191. static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg) {
  192. bool add_bos = ctx.chat_history.empty();
  193. auto formatted_chat = chat_add_and_format(ctx, msg);
  194. LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.c_str());
  195. mtmd_input_text text;
  196. text.text = formatted_chat.c_str();
  197. text.add_special = add_bos;
  198. text.parse_special = true;
  199. if (g_is_interrupted) return 0;
  200. mtmd::input_chunks chunks(mtmd_input_chunks_init());
  201. auto bitmaps_c_ptr = ctx.bitmaps.c_ptr();
  202. int32_t res = mtmd_tokenize(ctx.ctx_vision.get(),
  203. chunks.ptr.get(), // output
  204. &text, // text
  205. bitmaps_c_ptr.data(),
  206. bitmaps_c_ptr.size());
  207. if (res != 0) {
  208. LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
  209. return 1;
  210. }
  211. ctx.bitmaps.entries.clear();
  212. llama_pos new_n_past;
  213. if (mtmd_helper_eval_chunks(ctx.ctx_vision.get(),
  214. ctx.lctx, // lctx
  215. chunks.ptr.get(), // chunks
  216. ctx.n_past, // n_past
  217. 0, // seq_id
  218. ctx.n_batch, // n_batch
  219. true, // logits_last
  220. &new_n_past)) {
  221. LOG_ERR("Unable to eval prompt\n");
  222. return 1;
  223. }
  224. ctx.n_past = new_n_past;
  225. LOG("\n");
  226. return 0;
  227. }
  228. int main(int argc, char ** argv) {
  229. ggml_time_init();
  230. common_params params;
  231. params.sampling.temp = 0.2; // lower temp by default for better quality
  232. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
  233. return 1;
  234. }
  235. common_init();
  236. mtmd_helper_log_set(common_log_default_callback, nullptr);
  237. if (params.mmproj.path.empty()) {
  238. show_additional_info(argc, argv);
  239. LOG_ERR("ERR: Missing --mmproj argument\n");
  240. return 1;
  241. }
  242. mtmd_cli_context ctx(params);
  243. LOG_INF("%s: loading model: %s\n", __func__, params.model.path.c_str());
  244. bool is_single_turn = !params.prompt.empty() && !params.image.empty();
  245. int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
  246. // Ctrl+C handling
  247. {
  248. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  249. struct sigaction sigint_action;
  250. sigint_action.sa_handler = sigint_handler;
  251. sigemptyset (&sigint_action.sa_mask);
  252. sigint_action.sa_flags = 0;
  253. sigaction(SIGINT, &sigint_action, NULL);
  254. #elif defined (_WIN32)
  255. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  256. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  257. };
  258. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  259. #endif
  260. }
  261. if (g_is_interrupted) return 130;
  262. if (is_single_turn) {
  263. g_is_generating = true;
  264. if (params.prompt.find(mtmd_default_marker()) == std::string::npos) {
  265. for (size_t i = 0; i < params.image.size(); i++) {
  266. params.prompt += mtmd_default_marker();
  267. }
  268. }
  269. common_chat_msg msg;
  270. msg.role = "user";
  271. msg.content = params.prompt;
  272. for (const auto & image : params.image) {
  273. if (!ctx.load_media(image)) {
  274. return 1; // error is already printed by libmtmd
  275. }
  276. }
  277. if (eval_message(ctx, msg)) {
  278. return 1;
  279. }
  280. if (!g_is_interrupted && generate_response(ctx, n_predict)) {
  281. return 1;
  282. }
  283. } else {
  284. LOG("\n Running in chat mode, available commands:");
  285. if (mtmd_support_vision(ctx.ctx_vision.get())) {
  286. LOG("\n /image <path> load an image");
  287. }
  288. if (mtmd_support_audio(ctx.ctx_vision.get())) {
  289. LOG("\n /audio <path> load an audio");
  290. }
  291. LOG("\n /clear clear the chat history");
  292. LOG("\n /quit or /exit exit the program");
  293. LOG("\n");
  294. std::string content;
  295. while (!g_is_interrupted) {
  296. g_is_generating = false;
  297. LOG("\n> ");
  298. console::set_display(console::user_input);
  299. std::string line;
  300. console::readline(line, false);
  301. if (g_is_interrupted) break;
  302. console::set_display(console::reset);
  303. line = string_strip(line);
  304. if (line.empty()) {
  305. continue;
  306. }
  307. if (line == "/quit" || line == "/exit") {
  308. break;
  309. }
  310. if (line == "/clear") {
  311. ctx.n_past = 0;
  312. ctx.chat_history.clear();
  313. llama_memory_clear(llama_get_memory(ctx.lctx), true);
  314. LOG("Chat history cleared\n\n");
  315. continue;
  316. }
  317. g_is_generating = true;
  318. bool is_image = line == "/image" || line.find("/image ") == 0;
  319. bool is_audio = line == "/audio" || line.find("/audio ") == 0;
  320. if (is_image || is_audio) {
  321. if (line.size() < 8) {
  322. LOG_ERR("ERR: Missing media filename\n");
  323. continue;
  324. }
  325. std::string media_path = line.substr(7);
  326. if (ctx.load_media(media_path)) {
  327. LOG("%s %s loaded\n", media_path.c_str(), is_image ? "image" : "audio");
  328. content += mtmd_default_marker();
  329. }
  330. // else, error is already printed by libmtmd
  331. continue;
  332. } else {
  333. content += line;
  334. }
  335. common_chat_msg msg;
  336. msg.role = "user";
  337. msg.content = content;
  338. int ret = eval_message(ctx, msg);
  339. if (ret) {
  340. return 1;
  341. }
  342. if (g_is_interrupted) break;
  343. if (generate_response(ctx, n_predict)) {
  344. return 1;
  345. }
  346. content.clear();
  347. }
  348. }
  349. if (g_is_interrupted) LOG("\nInterrupted by user\n");
  350. LOG("\n\n");
  351. llama_perf_context_print(ctx.lctx);
  352. return g_is_interrupted ? 130 : 0;
  353. }