minicpmv-cli.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. #include "ggml.h"
  2. #include "log.h"
  3. #include "common.h"
  4. #include "clip.h"
  5. #include "llava.h"
  6. #include "llama.h"
  7. #include <cstdio>
  8. #include <cstdlib>
  9. #include <vector>
  10. struct llava_context {
  11. struct clip_ctx * ctx_clip = NULL;
  12. struct llama_context * ctx_llama = NULL;
  13. struct llama_model * model = NULL;
  14. };
  15. static void show_additional_info(int /*argc*/, char ** argv) {
  16. LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
  17. LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
  18. }
  19. static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
  20. (void) level;
  21. (void) user_data;
  22. LOG_TEE("%s", text);
  23. }
  24. static struct llama_model * llava_init(gpt_params * params) {
  25. llama_backend_init();
  26. llama_numa_init(params->numa);
  27. llama_model_params model_params = llama_model_params_from_gpt_params(*params);
  28. llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
  29. if (model == NULL) {
  30. LOG_TEE("%s: error: unable to load model\n" , __func__);
  31. return NULL;
  32. }
  33. return model;
  34. }
  35. static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
  36. auto prompt = params->prompt;
  37. if (prompt.empty()) {
  38. prompt = "describe the image in detail.";
  39. }
  40. llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
  41. if (params->n_ctx < 2048) {
  42. // warn user here, "Image processing requires at least 2048 context, setting context to 2048"
  43. LOG_TEE("%s: warn: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
  44. ctx_params.n_ctx = 2048;
  45. } else {
  46. ctx_params.n_ctx = params->n_ctx;
  47. }
  48. llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
  49. if (ctx_llama == NULL) {
  50. LOG_TEE("%s: error: failed to create the llama_context\n" , __func__);
  51. return NULL;
  52. }
  53. auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
  54. ctx_llava->ctx_llama = ctx_llama;
  55. ctx_llava->model = model;
  56. return ctx_llava;
  57. }
  58. static void llava_free(struct llava_context * ctx_llava) {
  59. if (ctx_llava->ctx_clip) {
  60. clip_free(ctx_llava->ctx_clip);
  61. ctx_llava->ctx_clip = NULL;
  62. }
  63. llama_free(ctx_llava->ctx_llama);
  64. llama_free_model(ctx_llava->model);
  65. llama_backend_free();
  66. }
  67. static struct clip_ctx * clip_init_context(gpt_params * params) {
  68. const char * clip_path = params->mmproj.c_str();
  69. auto prompt = params->prompt;
  70. if (prompt.empty()) {
  71. prompt = "describe the image in detail.";
  72. }
  73. auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
  74. return ctx_clip;
  75. }
  76. static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
  77. int N = (int) tokens.size();
  78. for (int i = 0; i < N; i += n_batch) {
  79. int n_eval = (int) tokens.size() - i;
  80. if (n_eval > n_batch) {
  81. n_eval = n_batch;
  82. }
  83. if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
  84. LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
  85. return false;
  86. }
  87. *n_past += n_eval;
  88. }
  89. return true;
  90. }
  91. static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
  92. std::vector<llama_token> tokens;
  93. tokens.push_back(id);
  94. return eval_tokens(ctx_llama, tokens, 1, n_past);
  95. }
  96. static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
  97. std::string str2 = str;
  98. std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
  99. return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
  100. }
  101. static void process_eval_image_embed(struct llava_context * ctx_llava, const struct llava_image_embed * embeds, int n_batch, int * n_past, int idx) {
  102. float * image_embed = (float *)malloc(clip_embd_nbytes(ctx_llava->ctx_clip));
  103. std::memcpy(image_embed, embeds->embed + idx * clip_n_patches(ctx_llava->ctx_clip) * clip_n_mmproj_embd(ctx_llava->ctx_clip), clip_embd_nbytes(ctx_llava->ctx_clip));
  104. auto slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed));
  105. slice_embed->embed = image_embed;
  106. slice_embed->n_image_pos = clip_n_patches(ctx_llava->ctx_clip);
  107. llava_eval_image_embed(ctx_llava->ctx_llama, slice_embed, n_batch, n_past);
  108. llava_image_embed_free(slice_embed);
  109. }
  110. static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, gpt_params * params, int &n_past) {
  111. std::string system_prompt;
  112. int idx = 0;
  113. int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
  114. system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
  115. LOG_TEE("%s: image token past: %d\n", __func__, n_past);
  116. eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false);
  119. if (num_image_embeds > 1) {
  120. size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
  121. eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
  122. for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
  123. for (size_t j = 0; j < num_image_embeds_col; ++j) {
  124. eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
  127. if (j == num_image_embeds_col - 1) {
  128. eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
  129. }
  130. }
  131. }
  132. eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
  133. }
  134. LOG_TEE("%s: image token past: %d\n", __func__, n_past);
  135. }
  136. static const char * sample(struct llama_sampling_context * ctx_sampling,
  137. struct llama_context * ctx_llama,
  138. int * n_past) {
  139. const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL);
  140. llama_sampling_accept(ctx_sampling, ctx_llama, id, true);
  141. static std::string ret;
  142. if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
  143. ret = "</s>";
  144. } else {
  145. ret = llama_token_to_piece(ctx_llama, id);
  146. }
  147. eval_id(ctx_llama, id, n_past);
  148. return ret.c_str();
  149. }
  150. static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){
  151. auto ctx_clip = clip_init_context(params);
  152. auto embeds = llava_image_embed_make_with_filename(ctx_clip, params->n_threads, fname.c_str());
  153. if (!embeds) {
  154. std::cerr << "error: failed to load image " << fname << ". Terminating\n\n";
  155. return NULL;
  156. }
  157. // process the prompt
  158. if (params->prompt.empty() && params->interactive == false) {
  159. LOG_TEE("prompt should be given or interactive mode should be on");
  160. return NULL;
  161. }
  162. auto model = llava_init(params);
  163. if (model == NULL) {
  164. fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
  165. return NULL;
  166. }
  167. const int64_t t_llava_init_start_us = ggml_time_us();
  168. auto ctx_llava = llava_init_context(params, model);
  169. ctx_llava->ctx_clip = ctx_clip;
  170. const int64_t t_llava_init_end_us = ggml_time_us();
  171. float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
  172. LOG_TEE("\n%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
  173. const int64_t t_process_image_start_us = ggml_time_us();
  174. process_image(ctx_llava, embeds, params, n_past);
  175. const int64_t t_process_image_end_us = ggml_time_us();
  176. float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
  177. LOG_TEE("\n%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
  178. llava_image_embed_free(embeds);
  179. return ctx_llava;
  180. }
  181. static struct llama_sampling_context * llama_init(struct llava_context * ctx_llava, gpt_params * params, std::string prompt, int &n_past, bool is_first = false){
  182. std::string user_prompt = prompt;
  183. if (!is_first) user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
  184. eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
  185. eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
  186. // generate the response
  187. LOG_TEE("\n");
  188. struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
  189. return ctx_sampling;
  190. }
  191. static const char * llama_loop(struct llava_context * ctx_llava,struct llama_sampling_context * ctx_sampling, int &n_past){
  192. const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
  193. return tmp;
  194. }
  195. int main(int argc, char ** argv) {
  196. ggml_time_init();
  197. gpt_params params;
  198. if (!gpt_params_parse(argc, argv, params)) {
  199. show_additional_info(argc, argv);
  200. return 1;
  201. }
  202. #ifndef LOG_DISABLE_LOGS
  203. log_set_target(log_filename_generator("llava", "log"));
  204. LOG_TEE("Log start\n");
  205. log_dump_cmdline(argc, argv);
  206. llama_log_set(llama_log_callback_logTee, nullptr);
  207. #endif // LOG_DISABLE_LOGS
  208. if (params.mmproj.empty() || (params.image.empty())) {
  209. gpt_params_print_usage(argc, argv, params);
  210. show_additional_info(argc, argv);
  211. return 1;
  212. }
  213. for (auto & image : params.image) {
  214. int n_past = 0;
  215. auto ctx_llava = minicpmv_init(&params, image, n_past);
  216. if (!params.prompt.empty()) {
  217. LOG_TEE("<user>%s\n", params.prompt.c_str());
  218. LOG_TEE("<assistant>");
  219. auto ctx_sampling = llama_init(ctx_llava, &params, params.prompt.c_str(), n_past, true);
  220. const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
  221. std::string response = "";
  222. bool have_tmp = false;
  223. for (int i = 0; i < max_tgt_len; i++) {
  224. auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past);
  225. response += tmp;
  226. if (strcmp(tmp, "</s>") == 0){
  227. if(!have_tmp)continue;
  228. else break;
  229. }
  230. if (strstr(tmp, "###")) break; // Yi-VL behavior
  231. have_tmp = true;
  232. printf("%s", tmp);
  233. if (strstr(response.c_str(), "<user>")) break; // minicpm-v
  234. fflush(stdout);
  235. }
  236. llama_sampling_free(ctx_sampling);
  237. }else {
  238. while (true) {
  239. LOG_TEE("<user>");
  240. std::string prompt;
  241. std::getline(std::cin, prompt);
  242. LOG_TEE("<assistant>");
  243. auto ctx_sampling = llama_init(ctx_llava, &params, prompt, n_past, true);
  244. const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
  245. std::string response = "";
  246. for (int i = 0; i < max_tgt_len; i++) {
  247. auto tmp = llama_loop(ctx_llava, ctx_sampling, n_past);
  248. response += tmp;
  249. if (strcmp(tmp, "</s>") == 0) break;
  250. if (strstr(tmp, "###")) break; // Yi-VL behavior
  251. printf("%s", tmp);// mistral llava-1.6
  252. if (strstr(response.c_str(), "<user>")) break; // minicpm-v
  253. fflush(stdout);
  254. }
  255. llama_sampling_free(ctx_sampling);
  256. }
  257. }
  258. printf("\n");
  259. llama_print_timings(ctx_llava->ctx_llama);
  260. ctx_llava->model = NULL;
  261. llava_free(ctx_llava);
  262. }
  263. return 0;
  264. }