gemma3-cli.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. #include "arg.h"
  2. #include "log.h"
  3. #include "common.h"
  4. #include "sampling.h"
  5. #include "clip.h"
  6. #include "stb_image.h"
  7. #include "llama.h"
  8. #include "ggml.h"
  9. #include "console.h"
  10. #include <vector>
  11. #include <limits.h>
  12. #include <inttypes.h>
  13. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  14. #include <signal.h>
  15. #include <unistd.h>
  16. #elif defined (_WIN32)
  17. #define WIN32_LEAN_AND_MEAN
  18. #ifndef NOMINMAX
  19. #define NOMINMAX
  20. #endif
  21. #include <windows.h>
  22. #include <signal.h>
  23. #endif
  24. static bool g_is_generating = false;
  25. /**
  26. * Please note that this is NOT a production-ready stuff.
  27. * It is a playground for trying Gemma 3 vision capabilities.
  28. * For contributors: please keep this code simple and easy to understand.
  29. */
  30. static void show_additional_info(int /*argc*/, char ** argv) {
  31. LOG(
  32. "Experimental CLI for using Gemma 3 vision model\n\n"
  33. "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> -p <prompt>\n\n"
  34. " -m and --mmproj are required\n"
  35. " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n",
  36. argv[0]
  37. );
  38. }
  39. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  40. static void sigint_handler(int signo) {
  41. if (signo == SIGINT) {
  42. if (g_is_generating) {
  43. g_is_generating = false;
  44. } else {
  45. console::cleanup();
  46. LOG("\nInterrupted by user\n");
  47. _exit(130);
  48. }
  49. }
  50. }
  51. #endif
  52. struct gemma3_context {
  53. struct clip_ctx * ctx_clip = NULL;
  54. common_init_result llama_init;
  55. llama_model * model;
  56. llama_context * lctx;
  57. const llama_vocab * vocab;
  58. llama_batch batch;
  59. int n_threads = 1;
  60. llama_pos n_past = 0;
  61. gemma3_context(common_params & params) : llama_init(common_init_from_params(params)) {
  62. model = llama_init.model.get();
  63. lctx = llama_init.context.get();
  64. vocab = llama_model_get_vocab(model);
  65. n_threads = params.cpuparams.n_threads;
  66. batch = llama_batch_init(params.n_batch, 0, 1);
  67. init_clip_model(params);
  68. }
  69. void init_clip_model(common_params & params) {
  70. const char * clip_path = params.mmproj.path.c_str();
  71. ctx_clip = clip_model_load(clip_path, GGML_LOG_LEVEL_INFO);
  72. if (!ctx_clip) {
  73. LOG_ERR("Failed to load CLIP model from %s\n", clip_path);
  74. exit(1);
  75. }
  76. }
  77. ~gemma3_context() {
  78. clip_free(ctx_clip);
  79. }
  80. };
  81. struct decode_embd_batch {
  82. std::vector<llama_pos> pos;
  83. std::vector<int32_t> n_seq_id;
  84. std::vector<llama_seq_id> seq_id_0;
  85. std::vector<llama_seq_id *> seq_ids;
  86. std::vector<int8_t> logits;
  87. llama_batch batch;
  88. decode_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
  89. pos .resize(n_tokens);
  90. n_seq_id.resize(n_tokens);
  91. seq_ids .resize(n_tokens + 1);
  92. logits .resize(n_tokens);
  93. seq_id_0.resize(1);
  94. seq_id_0[0] = seq_id;
  95. seq_ids [n_tokens] = nullptr;
  96. batch = {
  97. /*n_tokens =*/ n_tokens,
  98. /*tokens =*/ nullptr,
  99. /*embd =*/ embd,
  100. /*pos =*/ pos.data(),
  101. /*n_seq_id =*/ n_seq_id.data(),
  102. /*seq_id =*/ seq_ids.data(),
  103. /*logits =*/ logits.data(),
  104. };
  105. for (int i = 0; i < n_tokens; i++) {
  106. batch.pos [i] = pos_0 + i;
  107. batch.n_seq_id[i] = 1;
  108. batch.seq_id [i] = seq_id_0.data();
  109. batch.logits [i] = false;
  110. }
  111. }
  112. };
  113. static int eval_text(gemma3_context & ctx, std::string input, bool logits_last = false) {
  114. llama_tokens tokens = common_tokenize(ctx.lctx, input, false, true);
  115. common_batch_clear(ctx.batch);
  116. for (llama_token & t : tokens) {
  117. common_batch_add(ctx.batch, t, ctx.n_past++, {0}, false);
  118. }
  119. if (logits_last) {
  120. ctx.batch.logits[ctx.batch.n_tokens - 1] = true;
  121. }
  122. // LOG("eval_text (n_tokens = %d): %s\n", (int)tokens.size(), input.c_str());
  123. if (llama_decode(ctx.lctx, ctx.batch)) {
  124. LOG_ERR("Failed to decode text\n");
  125. return 1;
  126. }
  127. return 0;
  128. }
  129. static int eval_image(gemma3_context & ctx, std::string & fname) {
  130. std::vector<float> image_embd_v;
  131. int n_embd = llama_model_n_embd(ctx.model);
  132. int n_tokens = 256;
  133. image_embd_v.resize(n_tokens * n_embd);
  134. bool ok;
  135. struct clip_image_u8 * img_u8 = clip_image_u8_init();
  136. ok = clip_image_load_from_file(fname.c_str(), img_u8);
  137. if (!ok) {
  138. LOG_ERR("Unable to load image %s\n", fname.c_str());
  139. clip_image_u8_free(img_u8);
  140. return 2; // non-fatal error
  141. }
  142. clip_image_f32_batch batch_f32;
  143. ok = clip_image_preprocess(ctx.ctx_clip, img_u8, &batch_f32);
  144. if (!ok) {
  145. LOG_ERR("Unable to preprocess image\n");
  146. clip_image_f32_batch_free(&batch_f32);
  147. clip_image_u8_free(img_u8);
  148. return 1;
  149. }
  150. int64_t t0 = ggml_time_ms();
  151. LOG("Encoding image %s\n", fname.c_str());
  152. ok = clip_image_batch_encode(ctx.ctx_clip, ctx.n_threads, &batch_f32, image_embd_v.data());
  153. if (!ok) {
  154. LOG_ERR("Unable to encode image\n");
  155. clip_image_f32_batch_free(&batch_f32);
  156. clip_image_u8_free(img_u8);
  157. return 1;
  158. }
  159. LOG("Image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
  160. clip_image_f32_batch_free(&batch_f32);
  161. clip_image_u8_free(img_u8);
  162. // decode image embeddings
  163. int64_t t1 = ggml_time_ms();
  164. eval_text(ctx, "<start_of_image>");
  165. llama_set_causal_attn(ctx.lctx, false);
  166. decode_embd_batch batch_img(image_embd_v.data(), n_tokens, ctx.n_past, 0);
  167. if (llama_decode(ctx.lctx, batch_img.batch)) {
  168. LOG_ERR("failed to decode image\n");
  169. return 1;
  170. }
  171. ctx.n_past += n_tokens;
  172. llama_set_causal_attn(ctx.lctx, true);
  173. eval_text(ctx, "<end_of_image>");
  174. LOG("Image decoded in %" PRId64 " ms\n", ggml_time_ms() - t1);
  175. return 0;
  176. }
  177. static int generate_response(gemma3_context & ctx, common_sampler * smpl, int n_predict) {
  178. for (int i = 0; i < n_predict; i++) {
  179. if (i > n_predict || !g_is_generating) {
  180. printf("\n");
  181. break;
  182. }
  183. llama_token token_id = common_sampler_sample(smpl, ctx.lctx, -1);
  184. common_sampler_accept(smpl, token_id, true);
  185. if (llama_vocab_is_eog(ctx.vocab, token_id)) {
  186. printf("\n");
  187. break; // end of generation
  188. }
  189. printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
  190. fflush(stdout);
  191. // eval the token
  192. common_batch_clear(ctx.batch);
  193. common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
  194. if (llama_decode(ctx.lctx, ctx.batch)) {
  195. LOG_ERR("failed to decode token\n");
  196. return 1;
  197. }
  198. }
  199. return 0;
  200. }
  201. int main(int argc, char ** argv) {
  202. ggml_time_init();
  203. common_params params;
  204. params.sampling.temp = 0.2; // lower temp by default for better quality
  205. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
  206. return 1;
  207. }
  208. common_init();
  209. if (params.mmproj.path.empty()) {
  210. show_additional_info(argc, argv);
  211. return 1;
  212. }
  213. gemma3_context ctx(params);
  214. printf("%s: %s\n", __func__, params.model.path.c_str());
  215. bool is_single_turn = !params.prompt.empty() && !params.image.empty();
  216. struct common_sampler * smpl = common_sampler_init(ctx.model, params.sampling);
  217. int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
  218. // ctrl+C handling
  219. {
  220. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  221. struct sigaction sigint_action;
  222. sigint_action.sa_handler = sigint_handler;
  223. sigemptyset (&sigint_action.sa_mask);
  224. sigint_action.sa_flags = 0;
  225. sigaction(SIGINT, &sigint_action, NULL);
  226. #elif defined (_WIN32)
  227. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  228. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  229. };
  230. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  231. #endif
  232. }
  233. if (eval_text(ctx, "<bos>")) {
  234. return 1;
  235. }
  236. if (is_single_turn) {
  237. g_is_generating = true;
  238. if (eval_text(ctx, "<start_of_turn>user\n")) {
  239. return 1;
  240. }
  241. for (auto & fname : params.image) {
  242. if (eval_image(ctx, fname)) {
  243. return 1;
  244. }
  245. }
  246. if (eval_text(ctx, params.prompt + "<end_of_turn><start_of_turn>model\n", true)) {
  247. return 1;
  248. }
  249. if (generate_response(ctx, smpl, n_predict)) {
  250. return 1;
  251. }
  252. } else {
  253. LOG("\n Running in chat mode, available commands:");
  254. LOG("\n /image <path> load an image");
  255. LOG("\n /clear clear the chat history");
  256. LOG("\n /quit or /exit exit the program");
  257. LOG("\n");
  258. if (eval_text(ctx, "<start_of_turn>user\n")) {
  259. return 1;
  260. }
  261. while (true) {
  262. g_is_generating = false;
  263. LOG("\n> ");
  264. console::set_display(console::user_input);
  265. std::string line;
  266. console::readline(line, false);
  267. console::set_display(console::reset);
  268. line = string_strip(line);
  269. if (line.empty()) {
  270. continue;
  271. }
  272. if (line == "/quit" || line == "/exit") {
  273. break;
  274. }
  275. if (line == "/clear") {
  276. ctx.n_past = 0;
  277. llama_kv_self_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS
  278. LOG("Chat history cleared\n\n");
  279. continue;
  280. }
  281. g_is_generating = true;
  282. if (line.find("/image") == 0) {
  283. std::string image = line.substr(7);
  284. int res = eval_image(ctx, image);
  285. if (res == 2) {
  286. continue; // image not found
  287. }
  288. if (res) {
  289. return 1;
  290. }
  291. continue;
  292. }
  293. if (eval_text(ctx, line + "<end_of_turn><start_of_turn>model\n", true)) {
  294. return 1;
  295. }
  296. if (generate_response(ctx, smpl, n_predict)) {
  297. return 1;
  298. }
  299. if (eval_text(ctx, "<end_of_turn><start_of_turn>user\n")) {
  300. return 1;
  301. }
  302. }
  303. }
  304. return 0;
  305. }