qwen2vl-cli.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. #include "arg.h"
  2. #include "base64.hpp"
  3. #include "log.h"
  4. #include "common.h"
  5. #include "sampling.h"
  6. #include "clip.h"
  7. #include "llava.h"
  8. #include "llama.h"
  9. #include "ggml.h"
  10. #ifdef GGML_USE_CUDA
  11. #include "ggml-cuda.h"
  12. #endif
  13. #ifdef NDEBUG
  14. #include "ggml-alloc.h"
  15. #include "ggml-backend.h"
  16. #endif
  17. #include <cstdio>
  18. #include <cstdlib>
  19. #include <cstring>
  20. #include <vector>
  21. #include <algorithm>
  22. #include <iostream>
  23. #include <fstream>
  24. static bool qwen2vl_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed,
  25. int n_batch, int * n_past, int * st_pos_id, struct clip_image_size * image_size) {
  26. int n_embd = llama_model_n_embd(llama_get_model(ctx_llama));
  27. const int patch_size = 14 * 2;
  28. const int ph = image_size->height / patch_size + (image_size->height % patch_size > 0);
  29. const int pw = image_size->width / patch_size + (image_size->width % patch_size > 0);
  30. auto img_tokens = image_embed->n_image_pos;
  31. // llama_pos mrope_pos[img_tokens * 4];
  32. std::vector<llama_pos> mrope_pos;
  33. mrope_pos.resize(img_tokens * 4);
  34. for (int y = 0; y < ph; y++)
  35. {
  36. for (int x = 0; x < pw; x++)
  37. {
  38. int i = y * pw + x;
  39. mrope_pos[i] = *st_pos_id;
  40. mrope_pos[i + img_tokens] = *st_pos_id + y;
  41. mrope_pos[i + img_tokens * 2] = *st_pos_id + x;
  42. mrope_pos[i + img_tokens * 3] = 0;
  43. }
  44. }
  45. *st_pos_id += std::max(pw, ph);
  46. int processed = 0;
  47. std::vector<llama_pos> batch_mrope_pos;
  48. batch_mrope_pos.resize(img_tokens * 4);
  49. for (int i = 0; i < img_tokens; i += n_batch) {
  50. int n_eval = img_tokens - i;
  51. if (n_eval > n_batch) {
  52. n_eval = n_batch;
  53. }
  54. // llama_pos batch_mrope_pos[n_eval * 4];
  55. std::fill(batch_mrope_pos.begin(), batch_mrope_pos.end(), 0);
  56. memcpy(batch_mrope_pos.data(), &mrope_pos[processed], n_eval * sizeof(llama_pos));
  57. memcpy(&batch_mrope_pos[n_eval * 1], &mrope_pos[img_tokens * 1 + processed], n_eval * sizeof(llama_pos));
  58. memcpy(&batch_mrope_pos[n_eval * 2], &mrope_pos[img_tokens * 2 + processed], n_eval * sizeof(llama_pos));
  59. memcpy(&batch_mrope_pos[n_eval * 3], &mrope_pos[img_tokens * 3 + processed], n_eval * sizeof(llama_pos));
  60. llama_batch batch = {
  61. int32_t(n_eval), // n_tokens
  62. nullptr, // token
  63. (image_embed->embed+i*n_embd), // embed
  64. batch_mrope_pos.data(), // pos
  65. nullptr, // n_seq_id
  66. nullptr, // seq_id
  67. nullptr, // logits
  68. };
  69. if (llama_decode(ctx_llama, batch)) {
  70. LOG_ERR("%s : failed to eval\n", __func__);
  71. return false;
  72. }
  73. *n_past += n_eval;
  74. processed += n_eval;
  75. }
  76. return true;
  77. }
  78. static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past, int * st_pos_id) {
  79. int N = (int) tokens.size();
  80. std::vector<llama_pos> pos;
  81. for (int i = 0; i < N; i += n_batch) {
  82. int n_eval = (int) tokens.size() - i;
  83. if (n_eval > n_batch) {
  84. n_eval = n_batch;
  85. }
  86. auto batch = llama_batch_get_one(&tokens[i], n_eval);
  87. // TODO: add mrope pos ids somewhere else
  88. pos.resize(batch.n_tokens * 4);
  89. std::fill(pos.begin(), pos.end(), 0);
  90. for (int j = 0; j < batch.n_tokens * 3; j ++) {
  91. pos[j] = *st_pos_id + (j % batch.n_tokens);
  92. }
  93. batch.pos = pos.data();
  94. if (llama_decode(ctx_llama, batch)) {
  95. LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
  96. return false;
  97. }
  98. *n_past += n_eval;
  99. *st_pos_id += n_eval;
  100. }
  101. return true;
  102. }
  103. static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past, int * st_pos_id) {
  104. std::vector<llama_token> tokens;
  105. tokens.push_back(id);
  106. return eval_tokens(ctx_llama, tokens, 1, n_past, st_pos_id);
  107. }
  108. static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, int * st_pos_id, bool add_bos){
  109. std::string str2 = str;
  110. std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
  111. eval_tokens(ctx_llama, embd_inp, n_batch, n_past, st_pos_id);
  112. return true;
  113. }
  114. static const char * sample(struct common_sampler * smpl,
  115. struct llama_context * ctx_llama,
  116. int * n_past, int * st_pos_id) {
  117. const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
  118. common_sampler_accept(smpl, id, true);
  119. const llama_model * model = llama_get_model(ctx_llama);
  120. const llama_vocab * vocab = llama_model_get_vocab(model);
  121. static std::string ret;
  122. if (llama_vocab_is_eog(vocab, id)) {
  123. ret = "</s>";
  124. } else {
  125. ret = common_token_to_piece(ctx_llama, id);
  126. }
  127. eval_id(ctx_llama, id, n_past, st_pos_id);
  128. return ret.c_str();
  129. }
  130. static const char* IMG_BASE64_TAG_BEGIN = "<img src=\"data:image/jpeg;base64,";
  131. static const char* IMG_BASE64_TAG_END = "\">";
  132. static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) {
  133. begin_out = prompt.find(IMG_BASE64_TAG_BEGIN);
  134. end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out);
  135. }
  136. static bool prompt_contains_image(const std::string& prompt) {
  137. size_t begin, end;
  138. find_image_tag_in_prompt(prompt, begin, end);
  139. return (begin != std::string::npos);
  140. }
  141. // replaces the base64 image tag in the prompt with `replacement`
  142. static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) {
  143. size_t img_base64_str_start, img_base64_str_end;
  144. find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end);
  145. if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) {
  146. LOG_ERR("%s: invalid base64 image tag. must be %s<base64 byte string>%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END);
  147. return NULL;
  148. }
  149. auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN);
  150. auto base64_bytes_count = img_base64_str_end - base64_bytes_start;
  151. auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count );
  152. auto required_bytes = base64::required_encode_size(base64_str.size());
  153. auto img_bytes = std::vector<unsigned char>(required_bytes);
  154. base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin());
  155. auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size());
  156. if (!embed) {
  157. LOG_ERR("%s: could not load image from base64 string.\n", __func__);
  158. return NULL;
  159. }
  160. return embed;
  161. }
  162. static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") {
  163. size_t begin, end;
  164. find_image_tag_in_prompt(prompt, begin, end);
  165. if (begin == std::string::npos || end == std::string::npos) {
  166. return prompt;
  167. }
  168. auto pre = prompt.substr(0, begin);
  169. auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END));
  170. return pre + replacement + post;
  171. }
  172. struct llava_context {
  173. struct clip_ctx * ctx_clip = NULL;
  174. struct llama_context * ctx_llama = NULL;
  175. struct llama_model * model = NULL;
  176. };
  177. static void print_usage(int, char ** argv) {
  178. LOG("\n example usage:\n");
  179. LOG("\n %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
  180. LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n");
  181. }
  182. static struct llava_image_embed * load_image(llava_context * ctx_llava, common_params * params, const std::string & fname) {
  183. // load and preprocess the image
  184. llava_image_embed * embed = NULL;
  185. auto prompt = params->prompt;
  186. if (prompt_contains_image(prompt)) {
  187. if (!params->image.empty()) {
  188. LOG_INF("using base64 encoded image instead of command line image path\n");
  189. }
  190. embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->cpuparams.n_threads, prompt);
  191. if (!embed) {
  192. LOG_ERR("%s: can't load image from prompt\n", __func__);
  193. return NULL;
  194. }
  195. params->prompt = remove_image_from_prompt(prompt);
  196. } else {
  197. embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->cpuparams.n_threads, fname.c_str());
  198. if (!embed) {
  199. fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str());
  200. return NULL;
  201. }
  202. }
  203. return embed;
  204. }
  205. static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, common_params * params, const std::string & prompt) {
  206. int n_past = 0;
  207. int cur_pos_id = 0;
  208. const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
  209. std::string system_prompt, user_prompt;
  210. size_t image_pos = prompt.find("<|vision_start|>");
  211. if (image_pos != std::string::npos) {
  212. // new templating mode: Provide the full prompt including system message and use <image> as a placeholder for the image
  213. system_prompt = prompt.substr(0, image_pos);
  214. user_prompt = prompt.substr(image_pos + std::string("<|vision_pad|>").length());
  215. LOG_INF("system_prompt: %s\n", system_prompt.c_str());
  216. if (params->verbose_prompt) {
  217. auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
  218. for (int i = 0; i < (int) tmp.size(); i++) {
  219. LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
  220. }
  221. }
  222. LOG_INF("user_prompt: %s\n", user_prompt.c_str());
  223. if (params->verbose_prompt) {
  224. auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
  225. for (int i = 0; i < (int) tmp.size(); i++) {
  226. LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
  227. }
  228. }
  229. } else {
  230. // llava-1.5 native mode
  231. system_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|>";
  232. user_prompt = "<|vision_end|>" + prompt + "<|im_end|>\n<|im_start|>assistant\n";
  233. if (params->verbose_prompt) {
  234. auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
  235. for (int i = 0; i < (int) tmp.size(); i++) {
  236. LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
  237. }
  238. }
  239. }
  240. eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, &cur_pos_id, true);
  241. if (image_embed != nullptr) {
  242. auto image_size = clip_get_load_image_size(ctx_llava->ctx_clip);
  243. qwen2vl_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past, &cur_pos_id, image_size);
  244. }
  245. eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, &cur_pos_id, false);
  246. // generate the response
  247. LOG("\n");
  248. struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling);
  249. if (!smpl) {
  250. LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
  251. exit(1);
  252. }
  253. std::string response = "";
  254. for (int i = 0; i < max_tgt_len; i++) {
  255. const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past, &cur_pos_id);
  256. response += tmp;
  257. if (strcmp(tmp, "</s>") == 0) break;
  258. if (strstr(tmp, "###")) break; // Yi-VL behavior
  259. LOG("%s", tmp);
  260. if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
  261. if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
  262. if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
  263. fflush(stdout);
  264. }
  265. common_sampler_free(smpl);
  266. LOG("\n");
  267. }
  268. static struct llama_model * llava_init(common_params * params) {
  269. llama_backend_init();
  270. llama_numa_init(params->numa);
  271. llama_model_params model_params = common_model_params_to_llama(*params);
  272. llama_model * model = llama_model_load_from_file(params->model.path.c_str(), model_params);
  273. if (model == NULL) {
  274. LOG_ERR("%s: unable to load model\n" , __func__);
  275. return NULL;
  276. }
  277. return model;
  278. }
  279. static struct llava_context * llava_init_context(common_params * params, llama_model * model) {
  280. const char * clip_path = params->mmproj.path.c_str();
  281. auto prompt = params->prompt;
  282. if (prompt.empty()) {
  283. prompt = "describe the image in detail.";
  284. }
  285. auto ctx_clip = clip_model_load(clip_path, GGML_LOG_LEVEL_INFO);
  286. llama_context_params ctx_params = common_context_params_to_llama(*params);
  287. ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
  288. llama_context * ctx_llama = llama_init_from_model(model, ctx_params);
  289. if (ctx_llama == NULL) {
  290. LOG_ERR("%s: failed to create the llama_context\n" , __func__);
  291. return NULL;
  292. }
  293. auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
  294. ctx_llava->ctx_llama = ctx_llama;
  295. ctx_llava->ctx_clip = ctx_clip;
  296. ctx_llava->model = model;
  297. return ctx_llava;
  298. }
  299. static void llava_free(struct llava_context * ctx_llava) {
  300. if (ctx_llava->ctx_clip) {
  301. clip_free(ctx_llava->ctx_clip);
  302. ctx_llava->ctx_clip = NULL;
  303. }
  304. llama_free(ctx_llava->ctx_llama);
  305. llama_model_free(ctx_llava->model);
  306. llama_backend_free();
  307. }
  308. #ifndef NDEBUG
  309. static void debug_test_mrope_2d() {
  310. // 1. Initialize backend
  311. ggml_backend_t backend = NULL;
  312. std::string backend_name = "";
  313. #ifdef GGML_USE_CUDA
  314. fprintf(stderr, "%s: using CUDA backend\n", __func__);
  315. backend = ggml_backend_cuda_init(0); // init device 0
  316. backend_name = "cuda";
  317. if (!backend) {
  318. fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
  319. }
  320. #endif
  321. // if there aren't GPU Backends fallback to CPU backend
  322. if (!backend) {
  323. backend = ggml_backend_cpu_init();
  324. backend_name = "cpu";
  325. }
  326. // Calculate the size needed to allocate
  327. size_t ctx_size = 0;
  328. ctx_size += 2 * ggml_tensor_overhead(); // tensors
  329. // no need to allocate anything else!
  330. // 2. Allocate `ggml_context` to store tensor data
  331. struct ggml_init_params params = {
  332. /*.mem_size =*/ ctx_size,
  333. /*.mem_buffer =*/ NULL,
  334. /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_backend_alloc_ctx_tensors()
  335. };
  336. struct ggml_context * ctx = ggml_init(params);
  337. struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 128, 12, 30);
  338. ggml_set_name(inp_raw, "inp_raw");
  339. ggml_set_input(inp_raw);
  340. struct ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 30 * 4);
  341. ggml_set_name(pos, "pos");
  342. ggml_set_input(pos);
  343. std::vector<float> dummy_q;
  344. dummy_q.resize(128 * 12 * 30);
  345. std::fill(dummy_q.begin(), dummy_q.end(), 0.1);
  346. // memcpy(inp_raw->data, dummy_q.data(), 128 * 12 * 30 * ggml_element_size(inp_raw));
  347. std::vector<int> pos_id;
  348. pos_id.resize(30 * 4);
  349. for (int i = 0; i < 30; i ++) {
  350. pos_id[i] = i;
  351. pos_id[i + 30] = i + 10;
  352. pos_id[i + 60] = i + 20;
  353. pos_id[i + 90] = i + 30;
  354. }
  355. int sections[4] = {32, 32, 0, 0};
  356. // 4. Allocate a `ggml_backend_buffer` to store all tensors
  357. ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
  358. // 5. Copy tensor data from main memory (RAM) to backend buffer
  359. ggml_backend_tensor_set(inp_raw, dummy_q.data(), 0, ggml_nbytes(inp_raw));
  360. ggml_backend_tensor_set(pos, pos_id.data(), 0, ggml_nbytes(pos));
  361. // 6. Create a `ggml_cgraph` for mul_mat operation
  362. struct ggml_cgraph * gf = NULL;
  363. struct ggml_context * ctx_cgraph = NULL;
  364. // create a temporally context to build the graph
  365. struct ggml_init_params params0 = {
  366. /*.mem_size =*/ ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(),
  367. /*.mem_buffer =*/ NULL,
  368. /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph()
  369. };
  370. ctx_cgraph = ggml_init(params0);
  371. gf = ggml_new_graph(ctx_cgraph);
  372. struct ggml_tensor * result0 = ggml_rope_multi(
  373. ctx_cgraph, inp_raw, pos, nullptr,
  374. 128/2, sections, LLAMA_ROPE_TYPE_VISION, 32768, 1000000, 1,
  375. 0, 1, 32, 1);
  376. // Add "result" tensor and all of its dependencies to the cgraph
  377. ggml_build_forward_expand(gf, result0);
  378. // 7. Create a `ggml_gallocr` for cgraph computation
  379. ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
  380. ggml_gallocr_alloc_graph(allocr, gf);
  381. // 9. Run the computation
  382. int n_threads = 1; // Optional: number of threads to perform some operations with multi-threading
  383. if (ggml_backend_is_cpu(backend)) {
  384. ggml_backend_cpu_set_n_threads(backend, n_threads);
  385. }
  386. ggml_backend_graph_compute(backend, gf);
  387. // 10. Retrieve results (output tensors)
  388. // in this example, output tensor is always the last tensor in the graph
  389. struct ggml_tensor * result = result0;
  390. // struct ggml_tensor * result = gf->nodes[gf->n_nodes - 1];
  391. float * result_data = (float *)malloc(ggml_nbytes(result));
  392. // because the tensor data is stored in device buffer, we need to copy it back to RAM
  393. ggml_backend_tensor_get(result, result_data, 0, ggml_nbytes(result));
  394. const std::string bin_file = "mrope_2d_" + backend_name +".bin";
  395. std::ofstream outFile(bin_file, std::ios::binary);
  396. if (outFile.is_open()) {
  397. outFile.write(reinterpret_cast<const char*>(result_data), ggml_nbytes(result));
  398. outFile.close();
  399. std::cout << "Data successfully written to " + bin_file << std::endl;
  400. } else {
  401. std::cerr << "Error opening file!" << std::endl;
  402. }
  403. free(result_data);
  404. // 11. Free memory and exit
  405. ggml_free(ctx_cgraph);
  406. ggml_gallocr_free(allocr);
  407. ggml_free(ctx);
  408. ggml_backend_buffer_free(buffer);
  409. ggml_backend_free(backend);
  410. }
  411. static void debug_dump_img_embed(struct llava_context * ctx_llava) {
  412. int n_embd = llama_model_n_embd(llama_get_model(ctx_llava->ctx_llama));
  413. int ne = n_embd * 4;
  414. float vals[56 * 56 * 3];
  415. // float embd[ne];
  416. std::vector<float> embd;
  417. embd.resize(ne);
  418. for (int i = 0; i < 56*56; i++)
  419. {
  420. for (int c = 0; c < 3; c++)
  421. vals[i * 3 + c] = (float)(i % (56 * 56)) / (56*56);
  422. }
  423. clip_encode_float_image(ctx_llava->ctx_clip, 16, vals, 56, 56, embd.data());
  424. std::ofstream outFile("img_embed.bin", std::ios::binary);
  425. if (outFile.is_open()) {
  426. outFile.write(reinterpret_cast<const char*>(embd.data()), ne * sizeof(float));
  427. outFile.close();
  428. std::cout << "Data successfully written to mrope.bin" << std::endl;
  429. } else {
  430. std::cerr << "Error opening file!" << std::endl;
  431. }
  432. }
  433. #endif
  434. int main(int argc, char ** argv) {
  435. ggml_time_init();
  436. common_params params;
  437. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) {
  438. return 1;
  439. }
  440. common_init();
  441. if (params.mmproj.path.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
  442. print_usage(argc, argv);
  443. return 1;
  444. }
  445. auto * model = llava_init(&params);
  446. if (model == NULL) {
  447. fprintf(stderr, "%s: error: failed to init llava model\n", __func__);
  448. return 1;
  449. }
  450. if (prompt_contains_image(params.prompt)) {
  451. auto * ctx_llava = llava_init_context(&params, model);
  452. auto * image_embed = load_image(ctx_llava, &params, "");
  453. // process the prompt
  454. process_prompt(ctx_llava, image_embed, &params, params.prompt);
  455. llama_perf_context_print(ctx_llava->ctx_llama);
  456. llava_image_embed_free(image_embed);
  457. ctx_llava->model = NULL;
  458. llava_free(ctx_llava);
  459. #ifndef NDEBUG
  460. } else if (params.image[0].empty()) {
  461. auto ctx_llava = llava_init_context(&params, model);
  462. debug_test_mrope_2d();
  463. debug_dump_img_embed(ctx_llava);
  464. llama_perf_context_print(ctx_llava->ctx_llama);
  465. ctx_llava->model = NULL;
  466. llava_free(ctx_llava);
  467. #endif
  468. } else {
  469. for (auto & image : params.image) {
  470. auto * ctx_llava = llava_init_context(&params, model);
  471. auto * image_embed = load_image(ctx_llava, &params, image);
  472. if (!image_embed) {
  473. LOG_ERR("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str());
  474. return 1;
  475. }
  476. // process the prompt
  477. process_prompt(ctx_llava, image_embed, &params, params.prompt);
  478. llama_perf_context_print(ctx_llava->ctx_llama);
  479. llava_image_embed_free(image_embed);
  480. ctx_llava->model = NULL;
  481. llava_free(ctx_llava);
  482. }
  483. }
  484. llama_model_free(model);
  485. return 0;
  486. }