main.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. // Defines sigaction on msys:
  2. #ifndef _GNU_SOURCE
  3. #define _GNU_SOURCE
  4. #endif
  5. #include "common.h"
  6. #include "llama.h"
  7. #include "build-info.h"
  8. #include <cassert>
  9. #include <cinttypes>
  10. #include <cmath>
  11. #include <cstdio>
  12. #include <cstring>
  13. #include <ctime>
  14. #include <fstream>
  15. #include <iostream>
  16. #include <string>
  17. #include <vector>
  18. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  19. #include <signal.h>
  20. #include <unistd.h>
  21. #elif defined (_WIN32)
  22. #define WIN32_LEAN_AND_MEAN
  23. #define NOMINMAX
  24. #include <windows.h>
  25. #include <signal.h>
  26. #endif
  27. static console_state con_st;
  28. static llama_context ** g_ctx;
  29. static bool is_interacting = false;
  30. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  31. void sigint_handler(int signo) {
  32. if (signo == SIGINT) {
  33. if (!is_interacting) {
  34. is_interacting=true;
  35. } else {
  36. console_cleanup(con_st);
  37. printf("\n");
  38. llama_print_timings(*g_ctx);
  39. _exit(130);
  40. }
  41. }
  42. }
  43. #endif
  44. int main(int argc, char ** argv) {
  45. gpt_params params;
  46. if (gpt_params_parse(argc, argv, params) == false) {
  47. return 1;
  48. }
  49. // save choice to use color for later
  50. // (note for later: this is a slightly awkward choice)
  51. con_st.use_color = params.use_color;
  52. con_st.multiline_input = params.multiline_input;
  53. console_init(con_st);
  54. atexit([]() { console_cleanup(con_st); });
  55. if (params.perplexity) {
  56. printf("\n************\n");
  57. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  58. printf("************\n\n");
  59. return 0;
  60. }
  61. if (params.embedding) {
  62. printf("\n************\n");
  63. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  64. printf("************\n\n");
  65. return 0;
  66. }
  67. if (params.n_ctx > 2048) {
  68. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  69. "expect poor results\n", __func__, params.n_ctx);
  70. }
  71. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  72. if (params.seed < 0) {
  73. params.seed = time(NULL);
  74. }
  75. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  76. std::mt19937 rng(params.seed);
  77. if (params.random_prompt) {
  78. params.prompt = gpt_random_prompt(rng);
  79. }
  80. llama_init_backend();
  81. llama_context * ctx;
  82. g_ctx = &ctx;
  83. // load the model and apply lora adapter, if any
  84. ctx = llama_init_from_gpt_params(params);
  85. if (ctx == NULL) {
  86. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  87. return 1;
  88. }
  89. // print system information
  90. {
  91. fprintf(stderr, "\n");
  92. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  93. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  94. }
  95. // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
  96. // uncomment the "used_mem" line in llama.cpp to see the results
  97. if (params.mem_test) {
  98. {
  99. const std::vector<llama_token> tmp(params.n_batch, llama_token_bos());
  100. llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
  101. }
  102. {
  103. const std::vector<llama_token> tmp = { 0, };
  104. llama_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
  105. }
  106. llama_print_timings(ctx);
  107. llama_free(ctx);
  108. return 0;
  109. }
  110. std::string path_session = params.path_prompt_cache;
  111. std::vector<llama_token> session_tokens;
  112. if (!path_session.empty()) {
  113. fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
  114. // fopen to check for existing session
  115. FILE * fp = std::fopen(path_session.c_str(), "rb");
  116. if (fp != NULL) {
  117. std::fclose(fp);
  118. session_tokens.resize(params.n_ctx);
  119. size_t n_token_count_out = 0;
  120. if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
  121. fprintf(stderr, "%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
  122. return 1;
  123. }
  124. session_tokens.resize(n_token_count_out);
  125. llama_set_rng_seed(ctx, params.seed);
  126. fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n", __func__, (int) session_tokens.size());
  127. } else {
  128. fprintf(stderr, "%s: session file does not exist, will create\n", __func__);
  129. }
  130. }
  131. // tokenize the prompt
  132. std::vector<llama_token> embd_inp;
  133. if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
  134. // Add a space in front of the first character to match OG llama tokenizer behavior
  135. params.prompt.insert(0, 1, ' ');
  136. embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  137. } else {
  138. embd_inp = session_tokens;
  139. }
  140. const int n_ctx = llama_n_ctx(ctx);
  141. if ((int) embd_inp.size() > n_ctx - 4) {
  142. fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  143. return 1;
  144. }
  145. // debug message about similarity of saved session, if applicable
  146. size_t n_matching_session_tokens = 0;
  147. if (session_tokens.size()) {
  148. for (llama_token id : session_tokens) {
  149. if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
  150. break;
  151. }
  152. n_matching_session_tokens++;
  153. }
  154. if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
  155. fprintf(stderr, "%s: using full prompt from session file\n", __func__);
  156. } else if (n_matching_session_tokens >= embd_inp.size()) {
  157. fprintf(stderr, "%s: session file has exact match for prompt!\n", __func__);
  158. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  159. fprintf(stderr, "%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
  160. __func__, n_matching_session_tokens, embd_inp.size());
  161. } else {
  162. fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
  163. __func__, n_matching_session_tokens, embd_inp.size());
  164. }
  165. }
  166. // number of tokens to keep when resetting context
  167. if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct) {
  168. params.n_keep = (int)embd_inp.size();
  169. }
  170. // prefix & suffix for instruct mode
  171. const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true);
  172. const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
  173. // in instruct mode, we inject a prefix and a suffix to each input by the user
  174. if (params.instruct) {
  175. params.interactive_first = true;
  176. params.antiprompt.push_back("### Instruction:\n\n");
  177. }
  178. // enable interactive mode if interactive start is specified
  179. if (params.interactive_first) {
  180. params.interactive = true;
  181. }
  182. // determine newline token
  183. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  184. if (params.verbose_prompt) {
  185. fprintf(stderr, "\n");
  186. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  187. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  188. for (int i = 0; i < (int) embd_inp.size(); i++) {
  189. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  190. }
  191. if (params.n_keep > 0) {
  192. fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
  193. for (int i = 0; i < params.n_keep; i++) {
  194. fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]));
  195. }
  196. fprintf(stderr, "'\n");
  197. }
  198. fprintf(stderr, "\n");
  199. }
  200. if (params.interactive) {
  201. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  202. struct sigaction sigint_action;
  203. sigint_action.sa_handler = sigint_handler;
  204. sigemptyset (&sigint_action.sa_mask);
  205. sigint_action.sa_flags = 0;
  206. sigaction(SIGINT, &sigint_action, NULL);
  207. #elif defined (_WIN32)
  208. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  209. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  210. };
  211. SetConsoleCtrlHandler(static_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  212. #endif
  213. fprintf(stderr, "%s: interactive mode on.\n", __func__);
  214. if (params.antiprompt.size()) {
  215. for (auto antiprompt : params.antiprompt) {
  216. fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
  217. }
  218. }
  219. if (!params.input_prefix.empty()) {
  220. fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
  221. }
  222. if (!params.input_suffix.empty()) {
  223. fprintf(stderr, "Input suffix: '%s'\n", params.input_suffix.c_str());
  224. }
  225. }
  226. fprintf(stderr, "sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n",
  227. params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau);
  228. fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  229. fprintf(stderr, "\n\n");
  230. // TODO: replace with ring-buffer
  231. std::vector<llama_token> last_n_tokens(n_ctx);
  232. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  233. if (params.interactive) {
  234. const char *control_message;
  235. if (con_st.multiline_input) {
  236. control_message = " - To return control to LLaMa, end your input with '\\'.\n"
  237. " - To return control without starting a new line, end your input with '/'.\n";
  238. } else {
  239. control_message = " - Press Return to return control to LLaMa.\n"
  240. " - To return control without starting a new line, end your input with '/'.\n"
  241. " - If you want to submit another line, end your input with '\\'.\n";
  242. }
  243. fprintf(stderr, "== Running in interactive mode. ==\n"
  244. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  245. " - Press Ctrl+C to interject at any time.\n"
  246. #endif
  247. "%s\n", control_message);
  248. is_interacting = params.interactive_first;
  249. }
  250. bool is_antiprompt = false;
  251. bool input_echo = true;
  252. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  253. int n_past = 0;
  254. int n_remain = params.n_predict;
  255. int n_consumed = 0;
  256. int n_session_consumed = 0;
  257. // the first thing we will do is to output the prompt, so set color accordingly
  258. console_set_color(con_st, CONSOLE_COLOR_PROMPT);
  259. std::vector<llama_token> embd;
  260. while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
  261. // predict
  262. if (embd.size() > 0) {
  263. // infinite text generation via context swapping
  264. // if we run out of context:
  265. // - take the n_keep first tokens from the original prompt (via n_past)
  266. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
  267. if (n_past + (int) embd.size() > n_ctx) {
  268. const int n_left = n_past - params.n_keep;
  269. // always keep the first token - BOS
  270. n_past = std::max(1, params.n_keep);
  271. // insert n_left/2 tokens at the start of embd from last_n_tokens
  272. embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
  273. // stop saving session if we run out of context
  274. path_session.clear();
  275. //printf("\n---\n");
  276. //printf("resetting: '");
  277. //for (int i = 0; i < (int) embd.size(); i++) {
  278. // printf("%s", llama_token_to_str(ctx, embd[i]));
  279. //}
  280. //printf("'\n");
  281. //printf("\n---\n");
  282. }
  283. // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
  284. if (n_session_consumed < (int) session_tokens.size()) {
  285. size_t i = 0;
  286. for ( ; i < embd.size(); i++) {
  287. if (embd[i] != session_tokens[n_session_consumed]) {
  288. session_tokens.resize(n_session_consumed);
  289. break;
  290. }
  291. n_past++;
  292. n_session_consumed++;
  293. if (n_session_consumed >= (int) session_tokens.size()) {
  294. ++i;
  295. break;
  296. }
  297. }
  298. if (i > 0) {
  299. // check if we've used up all the prompt but not all cached tokens
  300. if (embd.size() == i && n_session_consumed < (int) session_tokens.size()) {
  301. // force revaluation of the last token to recalculate logits
  302. i--;
  303. n_past--;
  304. }
  305. embd.erase(embd.begin(), embd.begin() + i);
  306. }
  307. }
  308. // evaluate tokens in batches
  309. // embd is typically prepared beforehand to fit within a batch, but not always
  310. for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
  311. int n_eval = (int) embd.size() - i;
  312. if (n_eval > params.n_batch) {
  313. n_eval = params.n_batch;
  314. }
  315. if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
  316. fprintf(stderr, "%s : failed to eval\n", __func__);
  317. return 1;
  318. }
  319. n_past += n_eval;
  320. }
  321. if (embd.size() > 0 && !path_session.empty()) {
  322. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  323. n_session_consumed = session_tokens.size();
  324. }
  325. }
  326. embd.clear();
  327. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  328. // out of user input, sample next token
  329. const float temp = params.temp;
  330. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  331. const float top_p = params.top_p;
  332. const float tfs_z = params.tfs_z;
  333. const float typical_p = params.typical_p;
  334. const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
  335. const float repeat_penalty = params.repeat_penalty;
  336. const float alpha_presence = params.presence_penalty;
  337. const float alpha_frequency = params.frequency_penalty;
  338. const int mirostat = params.mirostat;
  339. const float mirostat_tau = params.mirostat_tau;
  340. const float mirostat_eta = params.mirostat_eta;
  341. const bool penalize_nl = params.penalize_nl;
  342. // optionally save the session on first sample (for faster prompt loading next time)
  343. if (!path_session.empty() && need_to_save_session) {
  344. need_to_save_session = false;
  345. llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  346. }
  347. llama_token id = 0;
  348. {
  349. auto logits = llama_get_logits(ctx);
  350. auto n_vocab = llama_n_vocab(ctx);
  351. // Apply params.logit_bias map
  352. for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
  353. logits[it->first] += it->second;
  354. }
  355. std::vector<llama_token_data> candidates;
  356. candidates.reserve(n_vocab);
  357. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  358. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  359. }
  360. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  361. // Apply penalties
  362. float nl_logit = logits[llama_token_nl()];
  363. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
  364. llama_sample_repetition_penalty(ctx, &candidates_p,
  365. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  366. last_n_repeat, repeat_penalty);
  367. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  368. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  369. last_n_repeat, alpha_frequency, alpha_presence);
  370. if (!penalize_nl) {
  371. logits[llama_token_nl()] = nl_logit;
  372. }
  373. if (temp <= 0) {
  374. // Greedy sampling
  375. id = llama_sample_token_greedy(ctx, &candidates_p);
  376. } else {
  377. if (mirostat == 1) {
  378. static float mirostat_mu = 2.0f * mirostat_tau;
  379. const int mirostat_m = 100;
  380. llama_sample_temperature(ctx, &candidates_p, temp);
  381. id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  382. } else if (mirostat == 2) {
  383. static float mirostat_mu = 2.0f * mirostat_tau;
  384. llama_sample_temperature(ctx, &candidates_p, temp);
  385. id = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  386. } else {
  387. // Temperature sampling
  388. llama_sample_top_k(ctx, &candidates_p, top_k, 1);
  389. llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
  390. llama_sample_typical(ctx, &candidates_p, typical_p, 1);
  391. llama_sample_top_p(ctx, &candidates_p, top_p, 1);
  392. llama_sample_temperature(ctx, &candidates_p, temp);
  393. id = llama_sample_token(ctx, &candidates_p);
  394. }
  395. }
  396. // printf("`%d`", candidates_p.size);
  397. last_n_tokens.erase(last_n_tokens.begin());
  398. last_n_tokens.push_back(id);
  399. }
  400. // replace end of text token with newline token when in interactive mode
  401. if (id == llama_token_eos() && params.interactive && !params.instruct) {
  402. id = llama_token_newline.front();
  403. if (params.antiprompt.size() != 0) {
  404. // tokenize and inject first reverse prompt
  405. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
  406. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  407. }
  408. }
  409. // add it to the context
  410. embd.push_back(id);
  411. // echo this to console
  412. input_echo = true;
  413. // decrement remaining sampling budget
  414. --n_remain;
  415. } else {
  416. // some user input remains from prompt or interaction, forward it to processing
  417. while ((int) embd_inp.size() > n_consumed) {
  418. embd.push_back(embd_inp[n_consumed]);
  419. last_n_tokens.erase(last_n_tokens.begin());
  420. last_n_tokens.push_back(embd_inp[n_consumed]);
  421. ++n_consumed;
  422. if ((int) embd.size() >= params.n_batch) {
  423. break;
  424. }
  425. }
  426. }
  427. // display text
  428. if (input_echo) {
  429. for (auto id : embd) {
  430. printf("%s", llama_token_to_str(ctx, id));
  431. }
  432. fflush(stdout);
  433. }
  434. // reset color to default if we there is no pending user input
  435. if (input_echo && (int)embd_inp.size() == n_consumed) {
  436. console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
  437. }
  438. // if not currently processing queued inputs;
  439. if ((int) embd_inp.size() <= n_consumed) {
  440. // check for reverse prompt
  441. if (params.antiprompt.size()) {
  442. std::string last_output;
  443. for (auto id : last_n_tokens) {
  444. last_output += llama_token_to_str(ctx, id);
  445. }
  446. is_antiprompt = false;
  447. // Check if each of the reverse prompts appears at the end of the output.
  448. // If we're not running interactively, the reverse prompt might be tokenized with some following characters
  449. // so we'll compensate for that by widening the search window a bit.
  450. for (std::string & antiprompt : params.antiprompt) {
  451. size_t extra_padding = params.interactive ? 0 : 2;
  452. size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
  453. ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
  454. : 0;
  455. if (last_output.find(antiprompt.c_str(), search_start_pos) != std::string::npos) {
  456. if (params.interactive) {
  457. is_interacting = true;
  458. console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
  459. }
  460. is_antiprompt = true;
  461. fflush(stdout);
  462. break;
  463. }
  464. }
  465. }
  466. if (n_past > 0 && is_interacting) {
  467. if (params.instruct) {
  468. printf("\n> ");
  469. }
  470. std::string buffer;
  471. if (!params.input_prefix.empty()) {
  472. buffer += params.input_prefix;
  473. printf("%s", buffer.c_str());
  474. }
  475. std::string line;
  476. bool another_line = true;
  477. do {
  478. another_line = console_readline(con_st, line);
  479. buffer += line;
  480. } while (another_line);
  481. // done taking input, reset color
  482. console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
  483. // Add tokens to embd only if the input buffer is non-empty
  484. // Entering a empty line lets the user pass control back
  485. if (buffer.length() > 1) {
  486. // append input suffix if any
  487. if (!params.input_suffix.empty()) {
  488. buffer += params.input_suffix;
  489. printf("%s", params.input_suffix.c_str());
  490. }
  491. // instruct mode: insert instruction prefix
  492. if (params.instruct && !is_antiprompt) {
  493. n_consumed = embd_inp.size();
  494. embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
  495. }
  496. auto line_inp = ::llama_tokenize(ctx, buffer, false);
  497. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  498. // instruct mode: insert response suffix
  499. if (params.instruct) {
  500. embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
  501. }
  502. n_remain -= line_inp.size();
  503. }
  504. input_echo = false; // do not echo this again
  505. }
  506. if (n_past > 0) {
  507. is_interacting = false;
  508. }
  509. }
  510. // end of text token
  511. if (!embd.empty() && embd.back() == llama_token_eos()) {
  512. if (params.instruct) {
  513. is_interacting = true;
  514. } else {
  515. fprintf(stderr, " [end of text]\n");
  516. break;
  517. }
  518. }
  519. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  520. if (params.interactive && n_remain <= 0 && params.n_predict != -1) {
  521. n_remain = params.n_predict;
  522. is_interacting = true;
  523. }
  524. }
  525. if (!path_session.empty() && params.prompt_cache_all) {
  526. fprintf(stderr, "\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
  527. llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  528. }
  529. llama_print_timings(ctx);
  530. llama_free(ctx);
  531. return 0;
  532. }