main.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. #include "common.h"
  2. #include "console.h"
  3. #include "llama.h"
  4. #include <cassert>
  5. #include <cinttypes>
  6. #include <cmath>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <ctime>
  10. #include <fstream>
  11. #include <iostream>
  12. #include <sstream>
  13. #include <string>
  14. #include <vector>
  15. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  16. #include <signal.h>
  17. #include <unistd.h>
  18. #elif defined (_WIN32)
  19. #define WIN32_LEAN_AND_MEAN
  20. #ifndef NOMINMAX
  21. #define NOMINMAX
  22. #endif
  23. #include <windows.h>
  24. #include <signal.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. #pragma warning(disable: 4244 4267) // possible loss of data
  28. #endif
  29. static llama_context ** g_ctx;
  30. static llama_model ** g_model;
  31. static gpt_params * g_params;
  32. static std::vector<llama_token> * g_input_tokens;
  33. static std::ostringstream * g_output_ss;
  34. static std::vector<llama_token> * g_output_tokens;
  35. static bool is_interacting = false;
  36. static bool need_insert_eot = false;
  37. static bool file_exists(const std::string & path) {
  38. std::ifstream f(path.c_str());
  39. return f.good();
  40. }
  41. static bool file_is_empty(const std::string & path) {
  42. std::ifstream f;
  43. f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
  44. f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
  45. return f.tellg() == 0;
  46. }
  47. static void write_logfile(
  48. const llama_context * ctx, const gpt_params & params, const llama_model * model,
  49. const std::vector<llama_token> & input_tokens, const std::string & output,
  50. const std::vector<llama_token> & output_tokens
  51. ) {
  52. if (params.logdir.empty()) {
  53. return;
  54. }
  55. const std::string timestamp = string_get_sortable_timestamp();
  56. const bool success = fs_create_directory_with_parents(params.logdir);
  57. if (!success) {
  58. fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
  59. __func__, params.logdir.c_str());
  60. return;
  61. }
  62. const std::string logfile_path = params.logdir + timestamp + ".yml";
  63. FILE * logfile = fopen(logfile_path.c_str(), "w");
  64. if (logfile == NULL) {
  65. fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
  66. return;
  67. }
  68. fprintf(logfile, "binary: main\n");
  69. char model_desc[128];
  70. llama_model_desc(model, model_desc, sizeof(model_desc));
  71. yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
  72. fprintf(logfile, "\n");
  73. fprintf(logfile, "######################\n");
  74. fprintf(logfile, "# Generation Results #\n");
  75. fprintf(logfile, "######################\n");
  76. fprintf(logfile, "\n");
  77. yaml_dump_string_multiline(logfile, "output", output.c_str());
  78. yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
  79. llama_dump_timing_info_yaml(logfile, ctx);
  80. fclose(logfile);
  81. }
  82. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  83. static void sigint_handler(int signo) {
  84. if (signo == SIGINT) {
  85. if (!is_interacting && g_params->interactive) {
  86. is_interacting = true;
  87. need_insert_eot = true;
  88. } else {
  89. console::cleanup();
  90. printf("\n");
  91. llama_print_timings(*g_ctx);
  92. write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
  93. _exit(130);
  94. }
  95. }
  96. }
  97. #endif
  98. static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
  99. (void) level;
  100. (void) user_data;
  101. LOG_TEE("%s", text);
  102. }
  103. static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, std::string role, std::string content) {
  104. llama_chat_msg new_msg{role, content};
  105. auto formatted = llama_chat_format_single(
  106. model, g_params->chat_template, chat_msgs, new_msg, role == "user");
  107. chat_msgs.push_back({role, content});
  108. return formatted;
  109. }
  110. int main(int argc, char ** argv) {
  111. gpt_params params;
  112. g_params = &params;
  113. if (!gpt_params_parse(argc, argv, params)) {
  114. gpt_params_print_usage(argc, argv, params);
  115. return 1;
  116. }
  117. llama_sampling_params & sparams = params.sparams;
  118. #ifndef LOG_DISABLE_LOGS
  119. log_set_target(log_filename_generator("main", "log"));
  120. LOG_TEE("Log start\n");
  121. log_dump_cmdline(argc, argv);
  122. llama_log_set(llama_log_callback_logTee, nullptr);
  123. #endif // LOG_DISABLE_LOGS
  124. // TODO: Dump params ?
  125. //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
  126. // save choice to use color for later
  127. // (note for later: this is a slightly awkward choice)
  128. console::init(params.simple_io, params.use_color);
  129. atexit([]() { console::cleanup(); });
  130. if (params.logits_all) {
  131. printf("\n************\n");
  132. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  133. printf("************\n\n");
  134. return 0;
  135. }
  136. if (params.embedding) {
  137. printf("\n************\n");
  138. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  139. printf("************\n\n");
  140. return 0;
  141. }
  142. if (params.n_ctx != 0 && params.n_ctx < 8) {
  143. LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
  144. params.n_ctx = 8;
  145. }
  146. if (params.rope_freq_base != 0.0) {
  147. LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
  148. }
  149. if (params.rope_freq_scale != 0.0) {
  150. LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
  151. }
  152. LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  153. LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  154. if (params.seed == LLAMA_DEFAULT_SEED) {
  155. params.seed = time(NULL);
  156. }
  157. LOG_TEE("%s: seed = %u\n", __func__, params.seed);
  158. std::mt19937 rng(params.seed);
  159. LOG("%s: llama backend init\n", __func__);
  160. llama_backend_init();
  161. llama_numa_init(params.numa);
  162. llama_model * model;
  163. llama_context * ctx;
  164. llama_context * ctx_guidance = NULL;
  165. std::vector<llama_chat_msg> chat_msgs;
  166. g_model = &model;
  167. g_ctx = &ctx;
  168. // load the model and apply lora adapter, if any
  169. LOG("%s: load the model and apply lora adapter, if any\n", __func__);
  170. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  171. if (sparams.cfg_scale > 1.f) {
  172. struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
  173. ctx_guidance = llama_new_context_with_model(model, lparams);
  174. }
  175. if (model == NULL) {
  176. LOG_TEE("%s: error: unable to load model\n", __func__);
  177. return 1;
  178. }
  179. const int n_ctx_train = llama_n_ctx_train(model);
  180. const int n_ctx = llama_n_ctx(ctx);
  181. LOG("n_ctx: %d\n", n_ctx);
  182. if (n_ctx > n_ctx_train) {
  183. LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
  184. __func__, n_ctx_train, n_ctx);
  185. }
  186. // print chat template example in conversation mode
  187. if (params.conversation) {
  188. if (params.enable_chat_template) {
  189. LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
  190. } else {
  191. LOG_TEE("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
  192. }
  193. }
  194. // print system information
  195. {
  196. LOG_TEE("\n");
  197. LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
  198. }
  199. std::string path_session = params.path_prompt_cache;
  200. std::vector<llama_token> session_tokens;
  201. if (!path_session.empty()) {
  202. LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
  203. if (!file_exists(path_session)) {
  204. LOG_TEE("%s: session file does not exist, will create.\n", __func__);
  205. } else if (file_is_empty(path_session)) {
  206. LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
  207. } else {
  208. // The file exists and is not empty
  209. session_tokens.resize(n_ctx);
  210. size_t n_token_count_out = 0;
  211. if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
  212. LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
  213. return 1;
  214. }
  215. session_tokens.resize(n_token_count_out);
  216. LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
  217. }
  218. }
  219. const bool add_bos = llama_should_add_bos_token(model);
  220. if (!llama_model_has_encoder(model)) {
  221. GGML_ASSERT(llama_add_eos_token(model) != 1);
  222. }
  223. LOG("add_bos: %d\n", add_bos);
  224. std::vector<llama_token> embd_inp;
  225. {
  226. auto prompt = (params.conversation && params.enable_chat_template && !params.prompt.empty())
  227. ? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
  228. : params.prompt;
  229. if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
  230. LOG("tokenize the prompt\n");
  231. embd_inp = ::llama_tokenize(ctx, prompt, true, true);
  232. } else {
  233. LOG("use session tokens\n");
  234. embd_inp = session_tokens;
  235. }
  236. LOG("prompt: \"%s\"\n", log_tostr(prompt));
  237. LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  238. }
  239. // Should not run without any tokens
  240. if (embd_inp.empty()) {
  241. embd_inp.push_back(llama_token_bos(model));
  242. LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  243. }
  244. // Tokenize negative prompt
  245. std::vector<llama_token> guidance_inp;
  246. int guidance_offset = 0;
  247. int original_prompt_len = 0;
  248. if (ctx_guidance) {
  249. LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
  250. guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true, true);
  251. LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
  252. std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true, true);
  253. LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
  254. original_prompt_len = original_inp.size();
  255. guidance_offset = (int)guidance_inp.size() - original_prompt_len;
  256. LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
  257. LOG("guidance_offset: %s", log_tostr(guidance_offset));
  258. }
  259. if ((int) embd_inp.size() > n_ctx - 4) {
  260. LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  261. return 1;
  262. }
  263. // debug message about similarity of saved session, if applicable
  264. size_t n_matching_session_tokens = 0;
  265. if (!session_tokens.empty()) {
  266. for (llama_token id : session_tokens) {
  267. if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
  268. break;
  269. }
  270. n_matching_session_tokens++;
  271. }
  272. if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
  273. LOG_TEE("%s: using full prompt from session file\n", __func__);
  274. } else if (n_matching_session_tokens >= embd_inp.size()) {
  275. LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
  276. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  277. LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
  278. __func__, n_matching_session_tokens, embd_inp.size());
  279. } else {
  280. LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
  281. __func__, n_matching_session_tokens, embd_inp.size());
  282. }
  283. // remove any "future" tokens that we might have inherited from the previous session
  284. llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
  285. }
  286. LOGLN(
  287. "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
  288. log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
  289. // if we will use the cache for the full prompt without reaching the end of the cache, force
  290. // reevaluation of the last token to recalculate the cached logits
  291. if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
  292. LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
  293. session_tokens.resize(embd_inp.size() - 1);
  294. }
  295. // number of tokens to keep when resetting context
  296. if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
  297. params.n_keep = (int)embd_inp.size();
  298. } else {
  299. params.n_keep += add_bos; // always keep the BOS token
  300. }
  301. if (params.conversation) {
  302. params.interactive_first = true;
  303. }
  304. // enable interactive mode if interactive start is specified
  305. if (params.interactive_first) {
  306. params.interactive = true;
  307. }
  308. if (params.verbose_prompt) {
  309. LOG_TEE("\n");
  310. LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  311. LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  312. for (int i = 0; i < (int) embd_inp.size(); i++) {
  313. LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
  314. }
  315. if (ctx_guidance) {
  316. LOG_TEE("\n");
  317. LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
  318. LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
  319. for (int i = 0; i < (int) guidance_inp.size(); i++) {
  320. LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
  321. }
  322. }
  323. if (params.n_keep > add_bos) {
  324. LOG_TEE("%s: static prompt based on n_keep: '", __func__);
  325. for (int i = 0; i < params.n_keep; i++) {
  326. LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
  327. }
  328. LOG_TEE("'\n");
  329. }
  330. LOG_TEE("\n");
  331. }
  332. // ctrl+C handling
  333. {
  334. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  335. struct sigaction sigint_action;
  336. sigint_action.sa_handler = sigint_handler;
  337. sigemptyset (&sigint_action.sa_mask);
  338. sigint_action.sa_flags = 0;
  339. sigaction(SIGINT, &sigint_action, NULL);
  340. #elif defined (_WIN32)
  341. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  342. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  343. };
  344. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  345. #endif
  346. }
  347. if (params.interactive) {
  348. LOG_TEE("%s: interactive mode on.\n", __func__);
  349. if (!params.antiprompt.empty()) {
  350. for (const auto & antiprompt : params.antiprompt) {
  351. LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
  352. if (params.verbose_prompt) {
  353. auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
  354. for (int i = 0; i < (int) tmp.size(); i++) {
  355. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  356. }
  357. }
  358. }
  359. }
  360. if (params.input_prefix_bos) {
  361. LOG_TEE("Input prefix with BOS\n");
  362. }
  363. if (!params.input_prefix.empty()) {
  364. LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
  365. if (params.verbose_prompt) {
  366. auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
  367. for (int i = 0; i < (int) tmp.size(); i++) {
  368. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  369. }
  370. }
  371. }
  372. if (!params.input_suffix.empty()) {
  373. LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
  374. if (params.verbose_prompt) {
  375. auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
  376. for (int i = 0; i < (int) tmp.size(); i++) {
  377. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  378. }
  379. }
  380. }
  381. }
  382. LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
  383. LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
  384. LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  385. // group-attention state
  386. // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
  387. int ga_i = 0;
  388. const int ga_n = params.grp_attn_n;
  389. const int ga_w = params.grp_attn_w;
  390. if (ga_n != 1) {
  391. GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
  392. GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
  393. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
  394. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
  395. LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
  396. }
  397. LOG_TEE("\n\n");
  398. if (params.interactive) {
  399. const char * control_message;
  400. if (params.multiline_input) {
  401. control_message = " - To return control to the AI, end your input with '\\'.\n"
  402. " - To return control without starting a new line, end your input with '/'.\n";
  403. } else {
  404. control_message = " - Press Return to return control to the AI.\n"
  405. " - To return control without starting a new line, end your input with '/'.\n"
  406. " - If you want to submit another line, end your input with '\\'.\n";
  407. }
  408. LOG_TEE("== Running in interactive mode. ==\n");
  409. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  410. LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
  411. #endif
  412. LOG_TEE( "%s\n", control_message);
  413. is_interacting = params.interactive_first;
  414. }
  415. bool is_antiprompt = false;
  416. bool input_echo = true;
  417. bool display = true;
  418. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  419. int n_past = 0;
  420. int n_remain = params.n_predict;
  421. int n_consumed = 0;
  422. int n_session_consumed = 0;
  423. int n_past_guidance = 0;
  424. std::vector<int> input_tokens; g_input_tokens = &input_tokens;
  425. std::vector<int> output_tokens; g_output_tokens = &output_tokens;
  426. std::ostringstream output_ss; g_output_ss = &output_ss;
  427. std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
  428. // the first thing we will do is to output the prompt, so set color accordingly
  429. console::set_display(console::prompt);
  430. display = params.display_prompt;
  431. std::vector<llama_token> embd;
  432. std::vector<llama_token> embd_guidance;
  433. // tokenized antiprompts
  434. std::vector<std::vector<llama_token>> antiprompt_ids;
  435. antiprompt_ids.reserve(params.antiprompt.size());
  436. for (const std::string & antiprompt : params.antiprompt) {
  437. antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
  438. }
  439. struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
  440. if (!ctx_sampling) {
  441. fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
  442. exit(1);
  443. }
  444. if (llama_model_has_encoder(model)) {
  445. int enc_input_size = embd_inp.size();
  446. llama_token * enc_input_buf = embd_inp.data();
  447. if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size, 0, 0))) {
  448. LOG_TEE("%s : failed to eval\n", __func__);
  449. return 1;
  450. }
  451. llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
  452. if (decoder_start_token_id == -1) {
  453. decoder_start_token_id = llama_token_bos(model);
  454. }
  455. embd_inp.clear();
  456. embd_inp.push_back(decoder_start_token_id);
  457. }
  458. while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
  459. // predict
  460. if (!embd.empty()) {
  461. // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
  462. // --prompt or --file which uses the same value.
  463. int max_embd_size = n_ctx - 4;
  464. // Ensure the input doesn't exceed the context size by truncating embd if necessary.
  465. if ((int) embd.size() > max_embd_size) {
  466. const int skipped_tokens = (int) embd.size() - max_embd_size;
  467. embd.resize(max_embd_size);
  468. console::set_display(console::error);
  469. printf("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
  470. console::set_display(console::reset);
  471. fflush(stdout);
  472. }
  473. if (ga_n == 1) {
  474. // infinite text generation via context shifting
  475. // if we run out of context:
  476. // - take the n_keep first tokens from the original prompt (via n_past)
  477. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
  478. if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) >= n_ctx) {
  479. if (params.n_predict == -2) {
  480. LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
  481. break;
  482. }
  483. const int n_left = n_past - params.n_keep;
  484. const int n_discard = n_left/2;
  485. LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
  486. n_past, n_left, n_ctx, params.n_keep, n_discard);
  487. llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
  488. llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
  489. n_past -= n_discard;
  490. if (ctx_guidance) {
  491. n_past_guidance -= n_discard;
  492. }
  493. LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
  494. LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  495. LOG("clear session path\n");
  496. path_session.clear();
  497. }
  498. } else {
  499. // context extension via Self-Extend
  500. while (n_past >= ga_i + ga_w) {
  501. const int ib = (ga_n*ga_i)/ga_w;
  502. const int bd = (ga_w/ga_n)*(ga_n - 1);
  503. const int dd = (ga_w/ga_n) - ib*bd - ga_w;
  504. LOG("\n");
  505. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
  506. LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
  507. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
  508. llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd);
  509. llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
  510. llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
  511. n_past -= bd;
  512. ga_i += ga_w/ga_n;
  513. LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
  514. }
  515. }
  516. // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
  517. if (n_session_consumed < (int) session_tokens.size()) {
  518. size_t i = 0;
  519. for ( ; i < embd.size(); i++) {
  520. if (embd[i] != session_tokens[n_session_consumed]) {
  521. session_tokens.resize(n_session_consumed);
  522. break;
  523. }
  524. n_past++;
  525. n_session_consumed++;
  526. if (n_session_consumed >= (int) session_tokens.size()) {
  527. ++i;
  528. break;
  529. }
  530. }
  531. if (i > 0) {
  532. embd.erase(embd.begin(), embd.begin() + i);
  533. }
  534. }
  535. // evaluate tokens in batches
  536. // embd is typically prepared beforehand to fit within a batch, but not always
  537. if (ctx_guidance) {
  538. int input_size = 0;
  539. llama_token * input_buf = NULL;
  540. if (n_past_guidance < (int) guidance_inp.size()) {
  541. // Guidance context should have the same data with these modifications:
  542. //
  543. // * Replace the initial prompt
  544. // * Shift everything by guidance_offset
  545. embd_guidance = guidance_inp;
  546. if (embd.begin() + original_prompt_len < embd.end()) {
  547. embd_guidance.insert(
  548. embd_guidance.end(),
  549. embd.begin() + original_prompt_len,
  550. embd.end()
  551. );
  552. }
  553. input_buf = embd_guidance.data();
  554. input_size = embd_guidance.size();
  555. LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
  556. } else {
  557. input_buf = embd.data();
  558. input_size = embd.size();
  559. }
  560. for (int i = 0; i < input_size; i += params.n_batch) {
  561. int n_eval = std::min(input_size - i, params.n_batch);
  562. if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) {
  563. LOG_TEE("%s : failed to eval\n", __func__);
  564. return 1;
  565. }
  566. n_past_guidance += n_eval;
  567. }
  568. }
  569. for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
  570. int n_eval = (int) embd.size() - i;
  571. if (n_eval > params.n_batch) {
  572. n_eval = params.n_batch;
  573. }
  574. LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  575. if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
  576. LOG_TEE("%s : failed to eval\n", __func__);
  577. return 1;
  578. }
  579. n_past += n_eval;
  580. LOG("n_past = %d\n", n_past);
  581. // Display total tokens alongside total time
  582. if (params.n_print > 0 && n_past % params.n_print == 0) {
  583. LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
  584. }
  585. }
  586. if (!embd.empty() && !path_session.empty()) {
  587. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  588. n_session_consumed = session_tokens.size();
  589. }
  590. }
  591. embd.clear();
  592. embd_guidance.clear();
  593. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  594. // optionally save the session on first sample (for faster prompt loading next time)
  595. if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
  596. need_to_save_session = false;
  597. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  598. LOG("saved session to %s\n", path_session.c_str());
  599. }
  600. const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
  601. llama_sampling_accept(ctx_sampling, ctx, id, /* apply_grammar= */ true);
  602. LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
  603. embd.push_back(id);
  604. // echo this to console
  605. input_echo = true;
  606. // decrement remaining sampling budget
  607. --n_remain;
  608. LOG("n_remain: %d\n", n_remain);
  609. } else {
  610. // some user input remains from prompt or interaction, forward it to processing
  611. LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
  612. while ((int) embd_inp.size() > n_consumed) {
  613. embd.push_back(embd_inp[n_consumed]);
  614. // push the prompt in the sampling context in order to apply repetition penalties later
  615. // for the prompt, we don't apply grammar rules
  616. llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], /* apply_grammar= */ false);
  617. ++n_consumed;
  618. if ((int) embd.size() >= params.n_batch) {
  619. break;
  620. }
  621. }
  622. }
  623. // display text
  624. if (input_echo && display) {
  625. for (auto id : embd) {
  626. const std::string token_str = llama_token_to_piece(ctx, id, params.special);
  627. // Console/Stream Output
  628. fprintf(stdout, "%s", token_str.c_str());
  629. // Record Displayed Tokens To Log
  630. // Note: Generated tokens are created one by one hence this check
  631. if (embd.size() > 1) {
  632. // Incoming Requested Tokens
  633. input_tokens.push_back(id);
  634. } else {
  635. // Outgoing Generated Tokens
  636. output_tokens.push_back(id);
  637. output_ss << token_str;
  638. }
  639. fflush(stdout);
  640. }
  641. }
  642. // reset color to default if there is no pending user input
  643. if (input_echo && (int) embd_inp.size() == n_consumed) {
  644. console::set_display(console::reset);
  645. display = true;
  646. }
  647. // if not currently processing queued inputs;
  648. if ((int) embd_inp.size() <= n_consumed) {
  649. // check for reverse prompt in the last n_prev tokens
  650. if (!params.antiprompt.empty()) {
  651. const int n_prev = 32;
  652. const std::string last_output = llama_sampling_prev_str(ctx_sampling, ctx, n_prev);
  653. is_antiprompt = false;
  654. // Check if each of the reverse prompts appears at the end of the output.
  655. // If we're not running interactively, the reverse prompt might be tokenized with some following characters
  656. // so we'll compensate for that by widening the search window a bit.
  657. for (std::string & antiprompt : params.antiprompt) {
  658. size_t extra_padding = params.interactive ? 0 : 2;
  659. size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
  660. ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
  661. : 0;
  662. if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
  663. if (params.interactive) {
  664. is_interacting = true;
  665. }
  666. is_antiprompt = true;
  667. break;
  668. }
  669. }
  670. // check for reverse prompt using special tokens
  671. llama_token last_token = llama_sampling_last(ctx_sampling);
  672. for (std::vector<llama_token> ids : antiprompt_ids) {
  673. if (ids.size() == 1 && last_token == ids[0]) {
  674. if (params.interactive) {
  675. is_interacting = true;
  676. }
  677. is_antiprompt = true;
  678. break;
  679. }
  680. }
  681. if (is_antiprompt) {
  682. LOG("found antiprompt: %s\n", last_output.c_str());
  683. }
  684. }
  685. // deal with end of generation tokens in interactive mode
  686. if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
  687. LOG("found an EOG token\n");
  688. if (params.interactive) {
  689. if (!params.antiprompt.empty()) {
  690. // tokenize and inject first reverse prompt
  691. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
  692. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  693. is_antiprompt = true;
  694. }
  695. if (params.enable_chat_template) {
  696. chat_add_and_format(model, chat_msgs, "assistant", assistant_ss.str());
  697. }
  698. is_interacting = true;
  699. printf("\n");
  700. }
  701. }
  702. // if current token is not EOG, we add it to current assistant message
  703. if (params.conversation) {
  704. auto id = llama_sampling_last(ctx_sampling);
  705. assistant_ss << llama_token_to_piece(ctx, id, false);
  706. }
  707. if (n_past > 0 && is_interacting) {
  708. LOG("waiting for user input\n");
  709. if (params.conversation) {
  710. printf("\n> ");
  711. }
  712. if (params.input_prefix_bos) {
  713. LOG("adding input prefix BOS token\n");
  714. embd_inp.push_back(llama_token_bos(model));
  715. }
  716. std::string buffer;
  717. if (!params.input_prefix.empty() && !params.conversation) {
  718. LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
  719. printf("%s", params.input_prefix.c_str());
  720. }
  721. // color user input only
  722. console::set_display(console::user_input);
  723. display = params.display_prompt;
  724. std::string line;
  725. bool another_line = true;
  726. do {
  727. another_line = console::readline(line, params.multiline_input);
  728. buffer += line;
  729. } while (another_line);
  730. // done taking input, reset color
  731. console::set_display(console::reset);
  732. display = true;
  733. // Add tokens to embd only if the input buffer is non-empty
  734. // Entering a empty line lets the user pass control back
  735. if (buffer.length() > 1) {
  736. // append input suffix if any
  737. if (!params.input_suffix.empty() && !params.conversation) {
  738. LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
  739. printf("%s", params.input_suffix.c_str());
  740. }
  741. LOG("buffer: '%s'\n", buffer.c_str());
  742. const size_t original_size = embd_inp.size();
  743. if (params.escape) {
  744. string_process_escapes(buffer);
  745. }
  746. bool format_chat = params.conversation && params.enable_chat_template;
  747. std::string user_inp = format_chat
  748. ? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
  749. : std::move(buffer);
  750. // TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
  751. const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
  752. const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
  753. const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
  754. LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
  755. // if user stop generation mid-way, we must add EOT to finish model's last response
  756. if (need_insert_eot && format_chat) {
  757. llama_token eot = llama_token_eot(model);
  758. embd_inp.push_back(eot == -1 ? llama_token_eos(model) : eot);
  759. need_insert_eot = false;
  760. }
  761. embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
  762. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  763. embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
  764. for (size_t i = original_size; i < embd_inp.size(); ++i) {
  765. const llama_token token = embd_inp[i];
  766. output_tokens.push_back(token);
  767. output_ss << llama_token_to_piece(ctx, token);
  768. }
  769. // reset assistant message
  770. assistant_ss.str("");
  771. n_remain -= line_inp.size();
  772. LOG("n_remain: %d\n", n_remain);
  773. } else {
  774. LOG("empty line, passing control back\n");
  775. }
  776. input_echo = false; // do not echo this again
  777. }
  778. if (n_past > 0) {
  779. if (is_interacting) {
  780. llama_sampling_reset(ctx_sampling);
  781. }
  782. is_interacting = false;
  783. }
  784. }
  785. // end of generation
  786. if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) {
  787. LOG_TEE(" [end of text]\n");
  788. break;
  789. }
  790. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  791. // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
  792. if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
  793. n_remain = params.n_predict;
  794. is_interacting = true;
  795. }
  796. }
  797. if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
  798. LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
  799. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  800. }
  801. llama_print_timings(ctx);
  802. write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
  803. if (ctx_guidance) { llama_free(ctx_guidance); }
  804. llama_free(ctx);
  805. llama_free_model(model);
  806. llama_sampling_free(ctx_sampling);
  807. llama_backend_free();
  808. #ifndef LOG_DISABLE_LOGS
  809. LOG_TEE("Log end\n");
  810. #endif // LOG_DISABLE_LOGS
  811. return 0;
  812. }