main.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // Defines sigaction on msys:
  2. #ifndef _GNU_SOURCE
  3. #define _GNU_SOURCE
  4. #endif
  5. #include "common.h"
  6. #include "llama.h"
  7. #include "build-info.h"
  8. #include <cassert>
  9. #include <cinttypes>
  10. #include <cmath>
  11. #include <cstdio>
  12. #include <cstring>
  13. #include <ctime>
  14. #include <fstream>
  15. #include <iostream>
  16. #include <string>
  17. #include <vector>
  18. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  19. #include <signal.h>
  20. #include <unistd.h>
  21. #elif defined (_WIN32)
  22. #define WIN32_LEAN_AND_MEAN
  23. #define NOMINMAX
  24. #include <windows.h>
  25. #include <signal.h>
  26. #endif
  27. static console_state con_st;
  28. static llama_context ** g_ctx;
  29. static bool is_interacting = false;
  30. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  31. void sigint_handler(int signo) {
  32. set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
  33. printf("\n"); // this also force flush stdout.
  34. if (signo == SIGINT) {
  35. if (!is_interacting) {
  36. is_interacting=true;
  37. } else {
  38. llama_print_timings(*g_ctx);
  39. _exit(130);
  40. }
  41. }
  42. }
  43. #endif
  44. int main(int argc, char ** argv) {
  45. gpt_params params;
  46. params.model = "models/llama-7B/ggml-model.bin";
  47. if (gpt_params_parse(argc, argv, params) == false) {
  48. return 1;
  49. }
  50. // save choice to use color for later
  51. // (note for later: this is a slightly awkward choice)
  52. con_st.use_color = params.use_color;
  53. #if defined (_WIN32)
  54. win32_console_init(params.use_color);
  55. #endif
  56. if (params.perplexity) {
  57. printf("\n************\n");
  58. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  59. printf("************\n\n");
  60. return 0;
  61. }
  62. if (params.embedding) {
  63. printf("\n************\n");
  64. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  65. printf("************\n\n");
  66. return 0;
  67. }
  68. if (params.n_ctx > 2048) {
  69. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  70. "expect poor results\n", __func__, params.n_ctx);
  71. }
  72. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  73. if (params.seed < 0) {
  74. params.seed = time(NULL);
  75. }
  76. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  77. std::mt19937 rng(params.seed);
  78. if (params.random_prompt) {
  79. params.prompt = gpt_random_prompt(rng);
  80. }
  81. // params.prompt = R"(// this function checks if the number n is prime
  82. //bool is_prime(int n) {)";
  83. llama_context * ctx;
  84. g_ctx = &ctx;
  85. // load the model and apply lora adapter, if any
  86. ctx = llama_init_from_gpt_params(params);
  87. if (ctx == NULL) {
  88. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  89. return 1;
  90. }
  91. // print system information
  92. {
  93. fprintf(stderr, "\n");
  94. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  95. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  96. }
  97. // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
  98. // uncomment the "used_mem" line in llama.cpp to see the results
  99. if (params.mem_test) {
  100. {
  101. const std::vector<llama_token> tmp(params.n_batch, 0);
  102. llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
  103. }
  104. {
  105. const std::vector<llama_token> tmp = { 0, };
  106. llama_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
  107. }
  108. llama_print_timings(ctx);
  109. llama_free(ctx);
  110. return 0;
  111. }
  112. // Add a space in front of the first character to match OG llama tokenizer behavior
  113. params.prompt.insert(0, 1, ' ');
  114. std::string path_session = params.path_session;
  115. std::vector<llama_token> session_tokens;
  116. if (!path_session.empty()) {
  117. fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
  118. // fopen to check for existing session
  119. FILE * fp = std::fopen(path_session.c_str(), "rb");
  120. if (fp != NULL) {
  121. std::fclose(fp);
  122. session_tokens.resize(params.n_ctx);
  123. size_t n_token_count_out = 0;
  124. if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
  125. fprintf(stderr, "%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
  126. return 1;
  127. }
  128. session_tokens.resize(n_token_count_out);
  129. fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n", __func__, (int) session_tokens.size());
  130. } else {
  131. fprintf(stderr, "%s: session file does not exist, will create\n", __func__);
  132. }
  133. }
  134. // tokenize the prompt
  135. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  136. const int n_ctx = llama_n_ctx(ctx);
  137. if ((int) embd_inp.size() > n_ctx - 4) {
  138. fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  139. return 1;
  140. }
  141. // debug message about similarity of saved session, if applicable
  142. size_t n_matching_session_tokens = 0;
  143. if (session_tokens.size()) {
  144. for (llama_token id : session_tokens) {
  145. if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
  146. break;
  147. }
  148. n_matching_session_tokens++;
  149. }
  150. if (n_matching_session_tokens >= embd_inp.size()) {
  151. fprintf(stderr, "%s: session file has exact match for prompt!\n", __func__);
  152. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  153. fprintf(stderr, "%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
  154. __func__, n_matching_session_tokens, embd_inp.size());
  155. } else {
  156. fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
  157. __func__, n_matching_session_tokens, embd_inp.size());
  158. }
  159. }
  160. // number of tokens to keep when resetting context
  161. if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct) {
  162. params.n_keep = (int)embd_inp.size();
  163. }
  164. // prefix & suffix for instruct mode
  165. const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true);
  166. const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
  167. // in instruct mode, we inject a prefix and a suffix to each input by the user
  168. if (params.instruct) {
  169. params.interactive_first = true;
  170. params.antiprompt.push_back("### Instruction:\n\n");
  171. }
  172. // enable interactive mode if reverse prompt or interactive start is specified
  173. if (params.antiprompt.size() != 0 || params.interactive_first) {
  174. params.interactive = true;
  175. }
  176. // determine newline token
  177. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  178. if (params.verbose_prompt) {
  179. fprintf(stderr, "\n");
  180. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  181. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  182. for (int i = 0; i < (int) embd_inp.size(); i++) {
  183. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  184. }
  185. if (params.n_keep > 0) {
  186. fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
  187. for (int i = 0; i < params.n_keep; i++) {
  188. fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]));
  189. }
  190. fprintf(stderr, "'\n");
  191. }
  192. fprintf(stderr, "\n");
  193. }
  194. if (params.interactive) {
  195. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  196. struct sigaction sigint_action;
  197. sigint_action.sa_handler = sigint_handler;
  198. sigemptyset (&sigint_action.sa_mask);
  199. sigint_action.sa_flags = 0;
  200. sigaction(SIGINT, &sigint_action, NULL);
  201. #elif defined (_WIN32)
  202. auto console_ctrl_handler = [](DWORD ctrl_type) -> BOOL {
  203. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  204. };
  205. SetConsoleCtrlHandler(static_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  206. #endif
  207. fprintf(stderr, "%s: interactive mode on.\n", __func__);
  208. if (params.antiprompt.size()) {
  209. for (auto antiprompt : params.antiprompt) {
  210. fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
  211. }
  212. }
  213. if (!params.input_prefix.empty()) {
  214. fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
  215. }
  216. }
  217. fprintf(stderr, "sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n",
  218. params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau);
  219. fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  220. fprintf(stderr, "\n\n");
  221. // TODO: replace with ring-buffer
  222. std::vector<llama_token> last_n_tokens(n_ctx);
  223. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  224. if (params.interactive) {
  225. fprintf(stderr, "== Running in interactive mode. ==\n"
  226. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  227. " - Press Ctrl+C to interject at any time.\n"
  228. #endif
  229. " - Press Return to return control to LLaMa.\n"
  230. " - If you want to submit another line, end your input in '\\'.\n\n");
  231. is_interacting = params.interactive_first;
  232. }
  233. bool is_antiprompt = false;
  234. bool input_echo = true;
  235. // HACK - because session saving incurs a non-negligible delay, for now skip re-saving session
  236. // if we loaded a session with at least 75% similarity. It's currently just used to speed up the
  237. // initial prompt so it doesn't need to be an exact match.
  238. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < (embd_inp.size() * 3 / 4);
  239. int n_past = 0;
  240. int n_remain = params.n_predict;
  241. int n_consumed = 0;
  242. int n_session_consumed = 0;
  243. // the first thing we will do is to output the prompt, so set color accordingly
  244. set_console_color(con_st, CONSOLE_COLOR_PROMPT);
  245. std::vector<llama_token> embd;
  246. while (n_remain != 0 || params.interactive) {
  247. // predict
  248. if (embd.size() > 0) {
  249. // infinite text generation via context swapping
  250. // if we run out of context:
  251. // - take the n_keep first tokens from the original prompt (via n_past)
  252. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
  253. if (n_past + (int) embd.size() > n_ctx) {
  254. const int n_left = n_past - params.n_keep;
  255. n_past = params.n_keep;
  256. // insert n_left/2 tokens at the start of embd from last_n_tokens
  257. embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
  258. // stop saving session if we run out of context
  259. path_session = "";
  260. //printf("\n---\n");
  261. //printf("resetting: '");
  262. //for (int i = 0; i < (int) embd.size(); i++) {
  263. // printf("%s", llama_token_to_str(ctx, embd[i]));
  264. //}
  265. //printf("'\n");
  266. //printf("\n---\n");
  267. }
  268. // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
  269. // REVIEW
  270. if (n_session_consumed < (int) session_tokens.size()) {
  271. size_t i = 0;
  272. for ( ; i < embd.size(); i++) {
  273. if (embd[i] != session_tokens[n_session_consumed]) {
  274. session_tokens.resize(n_session_consumed);
  275. break;
  276. }
  277. n_past++;
  278. n_session_consumed++;
  279. if (n_session_consumed >= (int) session_tokens.size()) {
  280. ++i;
  281. break;
  282. }
  283. }
  284. if (i > 0) {
  285. embd.erase(embd.begin(), embd.begin() + i);
  286. }
  287. }
  288. // evaluate tokens in batches
  289. // embd is typically prepared beforehand to fit within a batch, but not always
  290. for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
  291. int n_eval = (int) embd.size() - i;
  292. if (n_eval > params.n_batch) {
  293. n_eval = params.n_batch;
  294. }
  295. if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
  296. fprintf(stderr, "%s : failed to eval\n", __func__);
  297. return 1;
  298. }
  299. n_past += n_eval;
  300. }
  301. if (embd.size() > 0 && !path_session.empty()) {
  302. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  303. n_session_consumed = session_tokens.size();
  304. }
  305. }
  306. embd.clear();
  307. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  308. // out of user input, sample next token
  309. const float temp = params.temp;
  310. const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  311. const float top_p = params.top_p;
  312. const float tfs_z = params.tfs_z;
  313. const float typical_p = params.typical_p;
  314. const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
  315. const float repeat_penalty = params.repeat_penalty;
  316. const float alpha_presence = params.presence_penalty;
  317. const float alpha_frequency = params.frequency_penalty;
  318. const int mirostat = params.mirostat;
  319. const float mirostat_tau = params.mirostat_tau;
  320. const float mirostat_eta = params.mirostat_eta;
  321. const bool penalize_nl = params.penalize_nl;
  322. // optionally save the session on first sample (for faster prompt loading next time)
  323. if (!path_session.empty() && need_to_save_session) {
  324. need_to_save_session = false;
  325. llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  326. }
  327. llama_token id = 0;
  328. {
  329. auto logits = llama_get_logits(ctx);
  330. auto n_vocab = llama_n_vocab(ctx);
  331. // Apply params.logit_bias map
  332. for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
  333. logits[it->first] += it->second;
  334. }
  335. std::vector<llama_token_data> candidates;
  336. candidates.reserve(n_vocab);
  337. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  338. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  339. }
  340. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  341. // Apply penalties
  342. float nl_logit = logits[llama_token_nl()];
  343. auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
  344. llama_sample_repetition_penalty(ctx, &candidates_p,
  345. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  346. last_n_repeat, repeat_penalty);
  347. llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
  348. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  349. last_n_repeat, alpha_frequency, alpha_presence);
  350. if (!penalize_nl) {
  351. logits[llama_token_nl()] = nl_logit;
  352. }
  353. if (temp <= 0) {
  354. // Greedy sampling
  355. id = llama_sample_token_greedy(ctx, &candidates_p);
  356. } else {
  357. if (mirostat == 1) {
  358. static float mirostat_mu = 2.0f * mirostat_tau;
  359. const int mirostat_m = 100;
  360. llama_sample_temperature(ctx, &candidates_p, temp);
  361. id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
  362. } else if (mirostat == 2) {
  363. static float mirostat_mu = 2.0f * mirostat_tau;
  364. llama_sample_temperature(ctx, &candidates_p, temp);
  365. id = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  366. } else {
  367. // Temperature sampling
  368. llama_sample_top_k(ctx, &candidates_p, top_k);
  369. llama_sample_tail_free(ctx, &candidates_p, tfs_z);
  370. llama_sample_typical(ctx, &candidates_p, typical_p);
  371. llama_sample_top_p(ctx, &candidates_p, top_p);
  372. llama_sample_temperature(ctx, &candidates_p, temp);
  373. id = llama_sample_token(ctx, &candidates_p);
  374. }
  375. }
  376. // printf("`%d`", candidates_p.size);
  377. last_n_tokens.erase(last_n_tokens.begin());
  378. last_n_tokens.push_back(id);
  379. }
  380. // replace end of text token with newline token when in interactive mode
  381. if (id == llama_token_eos() && params.interactive && !params.instruct) {
  382. id = llama_token_newline.front();
  383. if (params.antiprompt.size() != 0) {
  384. // tokenize and inject first reverse prompt
  385. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
  386. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  387. }
  388. }
  389. // add it to the context
  390. embd.push_back(id);
  391. // echo this to console
  392. input_echo = true;
  393. // decrement remaining sampling budget
  394. --n_remain;
  395. } else {
  396. // some user input remains from prompt or interaction, forward it to processing
  397. while ((int) embd_inp.size() > n_consumed) {
  398. embd.push_back(embd_inp[n_consumed]);
  399. last_n_tokens.erase(last_n_tokens.begin());
  400. last_n_tokens.push_back(embd_inp[n_consumed]);
  401. ++n_consumed;
  402. if ((int) embd.size() >= params.n_batch) {
  403. break;
  404. }
  405. }
  406. }
  407. // display text
  408. if (input_echo) {
  409. for (auto id : embd) {
  410. printf("%s", llama_token_to_str(ctx, id));
  411. }
  412. fflush(stdout);
  413. }
  414. // reset color to default if we there is no pending user input
  415. if (input_echo && (int)embd_inp.size() == n_consumed) {
  416. set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
  417. }
  418. // in interactive mode, and not currently processing queued inputs;
  419. // check if we should prompt the user for more
  420. if (params.interactive && (int) embd_inp.size() <= n_consumed) {
  421. // check for reverse prompt
  422. if (params.antiprompt.size()) {
  423. std::string last_output;
  424. for (auto id : last_n_tokens) {
  425. last_output += llama_token_to_str(ctx, id);
  426. }
  427. is_antiprompt = false;
  428. // Check if each of the reverse prompts appears at the end of the output.
  429. for (std::string & antiprompt : params.antiprompt) {
  430. if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
  431. is_interacting = true;
  432. is_antiprompt = true;
  433. set_console_color(con_st, CONSOLE_COLOR_USER_INPUT);
  434. fflush(stdout);
  435. break;
  436. }
  437. }
  438. }
  439. if (n_past > 0 && is_interacting) {
  440. // potentially set color to indicate we are taking user input
  441. set_console_color(con_st, CONSOLE_COLOR_USER_INPUT);
  442. if (params.instruct) {
  443. printf("\n> ");
  444. }
  445. std::string buffer;
  446. if (!params.input_prefix.empty()) {
  447. buffer += params.input_prefix;
  448. printf("%s", buffer.c_str());
  449. }
  450. std::string line;
  451. bool another_line = true;
  452. do {
  453. #if defined(_WIN32)
  454. std::wstring wline;
  455. if (!std::getline(std::wcin, wline)) {
  456. // input stream is bad or EOF received
  457. return 0;
  458. }
  459. win32_utf8_encode(wline, line);
  460. #else
  461. if (!std::getline(std::cin, line)) {
  462. // input stream is bad or EOF received
  463. return 0;
  464. }
  465. #endif
  466. if (line.empty() || line.back() != '\\') {
  467. another_line = false;
  468. } else {
  469. line.pop_back(); // Remove the continue character
  470. }
  471. buffer += line + '\n'; // Append the line to the result
  472. } while (another_line);
  473. // done taking input, reset color
  474. set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
  475. // Add tokens to embd only if the input buffer is non-empty
  476. // Entering a empty line lets the user pass control back
  477. if (buffer.length() > 1) {
  478. // instruct mode: insert instruction prefix
  479. if (params.instruct && !is_antiprompt) {
  480. n_consumed = embd_inp.size();
  481. embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
  482. }
  483. auto line_inp = ::llama_tokenize(ctx, buffer, false);
  484. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  485. // instruct mode: insert response suffix
  486. if (params.instruct) {
  487. embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
  488. }
  489. n_remain -= line_inp.size();
  490. }
  491. input_echo = false; // do not echo this again
  492. }
  493. if (n_past > 0) {
  494. is_interacting = false;
  495. }
  496. }
  497. // end of text token
  498. if (!embd.empty() && embd.back() == llama_token_eos()) {
  499. if (params.instruct) {
  500. is_interacting = true;
  501. } else {
  502. fprintf(stderr, " [end of text]\n");
  503. break;
  504. }
  505. }
  506. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  507. if (params.interactive && n_remain <= 0 && params.n_predict != -1) {
  508. n_remain = params.n_predict;
  509. is_interacting = true;
  510. }
  511. }
  512. llama_print_timings(ctx);
  513. llama_free(ctx);
  514. set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
  515. return 0;
  516. }