main.cpp 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. #include "common.h"
  2. #include "console.h"
  3. #include "llama.h"
  4. #include <cassert>
  5. #include <cinttypes>
  6. #include <cmath>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <ctime>
  10. #include <fstream>
  11. #include <iostream>
  12. #include <sstream>
  13. #include <string>
  14. #include <vector>
  15. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  16. #include <signal.h>
  17. #include <unistd.h>
  18. #elif defined (_WIN32)
  19. #define WIN32_LEAN_AND_MEAN
  20. #ifndef NOMINMAX
  21. #define NOMINMAX
  22. #endif
  23. #include <windows.h>
  24. #include <signal.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. #pragma warning(disable: 4244 4267) // possible loss of data
  28. #endif
  29. static llama_context ** g_ctx;
  30. static llama_model ** g_model;
  31. static gpt_params * g_params;
  32. static std::vector<llama_token> * g_input_tokens;
  33. static std::ostringstream * g_output_ss;
  34. static std::vector<llama_token> * g_output_tokens;
  35. static bool is_interacting = false;
  36. static bool file_exists(const std::string &path) {
  37. std::ifstream f(path.c_str());
  38. return f.good();
  39. }
  40. static bool file_is_empty(const std::string &path) {
  41. std::ifstream f;
  42. f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
  43. f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
  44. return f.tellg() == 0;
  45. }
  46. static void write_logfile(
  47. const llama_context * ctx, const gpt_params & params, const llama_model * model,
  48. const std::vector<llama_token> & input_tokens, const std::string & output,
  49. const std::vector<llama_token> & output_tokens
  50. ) {
  51. if (params.logdir.empty()) {
  52. return;
  53. }
  54. const std::string timestamp = string_get_sortable_timestamp();
  55. const bool success = fs_create_directory_with_parents(params.logdir);
  56. if (!success) {
  57. fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
  58. __func__, params.logdir.c_str());
  59. return;
  60. }
  61. const std::string logfile_path = params.logdir + timestamp + ".yml";
  62. FILE * logfile = fopen(logfile_path.c_str(), "w");
  63. if (logfile == NULL) {
  64. fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
  65. return;
  66. }
  67. fprintf(logfile, "binary: main\n");
  68. char model_desc[128];
  69. llama_model_desc(model, model_desc, sizeof(model_desc));
  70. yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
  71. fprintf(logfile, "\n");
  72. fprintf(logfile, "######################\n");
  73. fprintf(logfile, "# Generation Results #\n");
  74. fprintf(logfile, "######################\n");
  75. fprintf(logfile, "\n");
  76. yaml_dump_string_multiline(logfile, "output", output.c_str());
  77. yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
  78. llama_dump_timing_info_yaml(logfile, ctx);
  79. fclose(logfile);
  80. }
  81. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  82. static void sigint_handler(int signo) {
  83. if (signo == SIGINT) {
  84. if (!is_interacting && g_params->interactive) {
  85. is_interacting = true;
  86. } else {
  87. console::cleanup();
  88. printf("\n");
  89. llama_print_timings(*g_ctx);
  90. write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
  91. _exit(130);
  92. }
  93. }
  94. }
  95. #endif
  96. static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
  97. (void) level;
  98. (void) user_data;
  99. LOG_TEE("%s", text);
  100. }
  101. int main(int argc, char ** argv) {
  102. gpt_params params;
  103. g_params = &params;
  104. if (!gpt_params_parse(argc, argv, params)) {
  105. gpt_params_print_usage(argc, argv, params);
  106. return 1;
  107. }
  108. llama_sampling_params & sparams = params.sparams;
  109. #ifndef LOG_DISABLE_LOGS
  110. log_set_target(log_filename_generator("main", "log"));
  111. LOG_TEE("Log start\n");
  112. log_dump_cmdline(argc, argv);
  113. llama_log_set(llama_log_callback_logTee, nullptr);
  114. #endif // LOG_DISABLE_LOGS
  115. // TODO: Dump params ?
  116. //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
  117. // save choice to use color for later
  118. // (note for later: this is a slightly awkward choice)
  119. console::init(params.simple_io, params.use_color);
  120. atexit([]() { console::cleanup(); });
  121. if (params.logits_all) {
  122. printf("\n************\n");
  123. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  124. printf("************\n\n");
  125. return 0;
  126. }
  127. if (params.embedding) {
  128. printf("\n************\n");
  129. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  130. printf("************\n\n");
  131. return 0;
  132. }
  133. if (params.n_ctx != 0 && params.n_ctx < 8) {
  134. LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
  135. params.n_ctx = 8;
  136. }
  137. if (params.rope_freq_base != 0.0) {
  138. LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
  139. }
  140. if (params.rope_freq_scale != 0.0) {
  141. LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
  142. }
  143. LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  144. LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  145. if (params.seed == LLAMA_DEFAULT_SEED) {
  146. params.seed = time(NULL);
  147. }
  148. LOG_TEE("%s: seed = %u\n", __func__, params.seed);
  149. std::mt19937 rng(params.seed);
  150. LOG("%s: llama backend init\n", __func__);
  151. llama_backend_init();
  152. llama_numa_init(params.numa);
  153. llama_model * model;
  154. llama_context * ctx;
  155. llama_context * ctx_guidance = NULL;
  156. g_model = &model;
  157. g_ctx = &ctx;
  158. // load the model and apply lora adapter, if any
  159. LOG("%s: load the model and apply lora adapter, if any\n", __func__);
  160. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  161. if (sparams.cfg_scale > 1.f) {
  162. struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
  163. ctx_guidance = llama_new_context_with_model(model, lparams);
  164. }
  165. if (model == NULL) {
  166. LOG_TEE("%s: error: unable to load model\n", __func__);
  167. return 1;
  168. }
  169. const int n_ctx_train = llama_n_ctx_train(model);
  170. const int n_ctx = llama_n_ctx(ctx);
  171. LOG("n_ctx: %d\n", n_ctx);
  172. if (n_ctx > n_ctx_train) {
  173. LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
  174. __func__, n_ctx_train, n_ctx);
  175. }
  176. // print system information
  177. {
  178. LOG_TEE("\n");
  179. LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
  180. }
  181. std::string path_session = params.path_prompt_cache;
  182. std::vector<llama_token> session_tokens;
  183. if (!path_session.empty()) {
  184. LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
  185. if (!file_exists(path_session)) {
  186. LOG_TEE("%s: session file does not exist, will create.\n", __func__);
  187. } else if (file_is_empty(path_session)) {
  188. LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
  189. } else {
  190. // The file exists and is not empty
  191. session_tokens.resize(n_ctx);
  192. size_t n_token_count_out = 0;
  193. if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
  194. LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
  195. return 1;
  196. }
  197. session_tokens.resize(n_token_count_out);
  198. LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
  199. }
  200. }
  201. const bool add_bos = llama_should_add_bos_token(model);
  202. GGML_ASSERT(llama_add_eos_token(model) != 1);
  203. LOG("add_bos: %d\n", add_bos);
  204. std::vector<llama_token> embd_inp;
  205. if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
  206. LOG("tokenize the prompt\n");
  207. embd_inp = ::llama_tokenize(ctx, params.prompt, true, true);
  208. } else {
  209. LOG("use session tokens\n");
  210. embd_inp = session_tokens;
  211. }
  212. LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
  213. LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  214. // Should not run without any tokens
  215. if (embd_inp.empty()) {
  216. embd_inp.push_back(llama_token_bos(model));
  217. LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  218. }
  219. // Tokenize negative prompt
  220. std::vector<llama_token> guidance_inp;
  221. int guidance_offset = 0;
  222. int original_prompt_len = 0;
  223. if (ctx_guidance) {
  224. LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
  225. guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true, true);
  226. LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
  227. std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true, true);
  228. LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
  229. original_prompt_len = original_inp.size();
  230. guidance_offset = (int)guidance_inp.size() - original_prompt_len;
  231. LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
  232. LOG("guidance_offset: %s", log_tostr(guidance_offset));
  233. }
  234. if ((int) embd_inp.size() > n_ctx - 4) {
  235. LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  236. return 1;
  237. }
  238. // debug message about similarity of saved session, if applicable
  239. size_t n_matching_session_tokens = 0;
  240. if (!session_tokens.empty()) {
  241. for (llama_token id : session_tokens) {
  242. if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
  243. break;
  244. }
  245. n_matching_session_tokens++;
  246. }
  247. if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
  248. LOG_TEE("%s: using full prompt from session file\n", __func__);
  249. } else if (n_matching_session_tokens >= embd_inp.size()) {
  250. LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
  251. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  252. LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
  253. __func__, n_matching_session_tokens, embd_inp.size());
  254. } else {
  255. LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
  256. __func__, n_matching_session_tokens, embd_inp.size());
  257. }
  258. // remove any "future" tokens that we might have inherited from the previous session
  259. llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
  260. }
  261. LOGLN(
  262. "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
  263. log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
  264. // if we will use the cache for the full prompt without reaching the end of the cache, force
  265. // reevaluation of the last token to recalculate the cached logits
  266. if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
  267. LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
  268. session_tokens.resize(embd_inp.size() - 1);
  269. }
  270. // number of tokens to keep when resetting context
  271. if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
  272. params.n_keep = (int)embd_inp.size();
  273. } else {
  274. params.n_keep += add_bos; // always keep the BOS token
  275. }
  276. if (params.conversation) {
  277. params.interactive_first = true;
  278. }
  279. // enable interactive mode if interactive start is specified
  280. if (params.interactive_first) {
  281. params.interactive = true;
  282. }
  283. if (params.verbose_prompt) {
  284. LOG_TEE("\n");
  285. LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  286. LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  287. for (int i = 0; i < (int) embd_inp.size(); i++) {
  288. LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
  289. }
  290. if (ctx_guidance) {
  291. LOG_TEE("\n");
  292. LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
  293. LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
  294. for (int i = 0; i < (int) guidance_inp.size(); i++) {
  295. LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
  296. }
  297. }
  298. if (params.n_keep > add_bos) {
  299. LOG_TEE("%s: static prompt based on n_keep: '", __func__);
  300. for (int i = 0; i < params.n_keep; i++) {
  301. LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
  302. }
  303. LOG_TEE("'\n");
  304. }
  305. LOG_TEE("\n");
  306. }
  307. // ctrl+C handling
  308. {
  309. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  310. struct sigaction sigint_action;
  311. sigint_action.sa_handler = sigint_handler;
  312. sigemptyset (&sigint_action.sa_mask);
  313. sigint_action.sa_flags = 0;
  314. sigaction(SIGINT, &sigint_action, NULL);
  315. #elif defined (_WIN32)
  316. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  317. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  318. };
  319. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  320. #endif
  321. }
  322. if (params.interactive) {
  323. LOG_TEE("%s: interactive mode on.\n", __func__);
  324. if (!params.antiprompt.empty()) {
  325. for (const auto & antiprompt : params.antiprompt) {
  326. LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
  327. if (params.verbose_prompt) {
  328. auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
  329. for (int i = 0; i < (int) tmp.size(); i++) {
  330. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  331. }
  332. }
  333. }
  334. }
  335. if (params.input_prefix_bos) {
  336. LOG_TEE("Input prefix with BOS\n");
  337. }
  338. if (!params.input_prefix.empty()) {
  339. LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
  340. if (params.verbose_prompt) {
  341. auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
  342. for (int i = 0; i < (int) tmp.size(); i++) {
  343. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  344. }
  345. }
  346. }
  347. if (!params.input_suffix.empty()) {
  348. LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
  349. if (params.verbose_prompt) {
  350. auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
  351. for (int i = 0; i < (int) tmp.size(); i++) {
  352. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  353. }
  354. }
  355. }
  356. }
  357. LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
  358. LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
  359. LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  360. // group-attention state
  361. // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
  362. int ga_i = 0;
  363. const int ga_n = params.grp_attn_n;
  364. const int ga_w = params.grp_attn_w;
  365. if (ga_n != 1) {
  366. GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
  367. GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
  368. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
  369. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
  370. LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
  371. }
  372. LOG_TEE("\n\n");
  373. if (params.interactive) {
  374. const char * control_message;
  375. if (params.multiline_input) {
  376. control_message = " - To return control to the AI, end your input with '\\'.\n"
  377. " - To return control without starting a new line, end your input with '/'.\n";
  378. } else {
  379. control_message = " - Press Return to return control to the AI.\n"
  380. " - To return control without starting a new line, end your input with '/'.\n"
  381. " - If you want to submit another line, end your input with '\\'.\n";
  382. }
  383. LOG_TEE("== Running in interactive mode. ==\n");
  384. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  385. LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
  386. #endif
  387. LOG_TEE( "%s\n", control_message);
  388. is_interacting = params.interactive_first;
  389. }
  390. bool is_antiprompt = false;
  391. bool input_echo = true;
  392. bool display = true;
  393. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  394. int n_past = 0;
  395. int n_remain = params.n_predict;
  396. int n_consumed = 0;
  397. int n_session_consumed = 0;
  398. int n_past_guidance = 0;
  399. std::vector<int> input_tokens; g_input_tokens = &input_tokens;
  400. std::vector<int> output_tokens; g_output_tokens = &output_tokens;
  401. std::ostringstream output_ss; g_output_ss = &output_ss;
  402. // the first thing we will do is to output the prompt, so set color accordingly
  403. console::set_display(console::prompt);
  404. display = params.display_prompt;
  405. std::vector<llama_token> embd;
  406. std::vector<llama_token> embd_guidance;
  407. // tokenized antiprompts
  408. std::vector<std::vector<llama_token>> antiprompt_ids;
  409. antiprompt_ids.reserve(params.antiprompt.size());
  410. for (const std::string & antiprompt : params.antiprompt) {
  411. antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
  412. }
  413. struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
  414. if (!ctx_sampling) {
  415. fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
  416. exit(1);
  417. }
  418. while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
  419. // predict
  420. if (!embd.empty()) {
  421. // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
  422. // --prompt or --file which uses the same value.
  423. int max_embd_size = n_ctx - 4;
  424. // Ensure the input doesn't exceed the context size by truncating embd if necessary.
  425. if ((int) embd.size() > max_embd_size) {
  426. const int skipped_tokens = (int) embd.size() - max_embd_size;
  427. embd.resize(max_embd_size);
  428. console::set_display(console::error);
  429. printf("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
  430. console::set_display(console::reset);
  431. fflush(stdout);
  432. }
  433. if (ga_n == 1) {
  434. // infinite text generation via context shifting
  435. // if we run out of context:
  436. // - take the n_keep first tokens from the original prompt (via n_past)
  437. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
  438. if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) >= n_ctx) {
  439. if (params.n_predict == -2) {
  440. LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
  441. break;
  442. }
  443. const int n_left = n_past - params.n_keep;
  444. const int n_discard = n_left/2;
  445. LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
  446. n_past, n_left, n_ctx, params.n_keep, n_discard);
  447. llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
  448. llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
  449. n_past -= n_discard;
  450. if (ctx_guidance) {
  451. n_past_guidance -= n_discard;
  452. }
  453. LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
  454. LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  455. LOG("clear session path\n");
  456. path_session.clear();
  457. }
  458. } else {
  459. // context extension via Self-Extend
  460. while (n_past >= ga_i + ga_w) {
  461. const int ib = (ga_n*ga_i)/ga_w;
  462. const int bd = (ga_w/ga_n)*(ga_n - 1);
  463. const int dd = (ga_w/ga_n) - ib*bd - ga_w;
  464. LOG("\n");
  465. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
  466. LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
  467. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
  468. llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd);
  469. llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
  470. llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
  471. n_past -= bd;
  472. ga_i += ga_w/ga_n;
  473. LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
  474. }
  475. }
  476. // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
  477. if (n_session_consumed < (int) session_tokens.size()) {
  478. size_t i = 0;
  479. for ( ; i < embd.size(); i++) {
  480. if (embd[i] != session_tokens[n_session_consumed]) {
  481. session_tokens.resize(n_session_consumed);
  482. break;
  483. }
  484. n_past++;
  485. n_session_consumed++;
  486. if (n_session_consumed >= (int) session_tokens.size()) {
  487. ++i;
  488. break;
  489. }
  490. }
  491. if (i > 0) {
  492. embd.erase(embd.begin(), embd.begin() + i);
  493. }
  494. }
  495. // evaluate tokens in batches
  496. // embd is typically prepared beforehand to fit within a batch, but not always
  497. if (ctx_guidance) {
  498. int input_size = 0;
  499. llama_token * input_buf = NULL;
  500. if (n_past_guidance < (int) guidance_inp.size()) {
  501. // Guidance context should have the same data with these modifications:
  502. //
  503. // * Replace the initial prompt
  504. // * Shift everything by guidance_offset
  505. embd_guidance = guidance_inp;
  506. if (embd.begin() + original_prompt_len < embd.end()) {
  507. embd_guidance.insert(
  508. embd_guidance.end(),
  509. embd.begin() + original_prompt_len,
  510. embd.end()
  511. );
  512. }
  513. input_buf = embd_guidance.data();
  514. input_size = embd_guidance.size();
  515. LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
  516. } else {
  517. input_buf = embd.data();
  518. input_size = embd.size();
  519. }
  520. for (int i = 0; i < input_size; i += params.n_batch) {
  521. int n_eval = std::min(input_size - i, params.n_batch);
  522. if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) {
  523. LOG_TEE("%s : failed to eval\n", __func__);
  524. return 1;
  525. }
  526. n_past_guidance += n_eval;
  527. }
  528. }
  529. for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
  530. int n_eval = (int) embd.size() - i;
  531. if (n_eval > params.n_batch) {
  532. n_eval = params.n_batch;
  533. }
  534. LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  535. if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
  536. LOG_TEE("%s : failed to eval\n", __func__);
  537. return 1;
  538. }
  539. n_past += n_eval;
  540. LOG("n_past = %d\n", n_past);
  541. // Display total tokens alongside total time
  542. if (params.n_print > 0 && n_past % params.n_print == 0) {
  543. LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
  544. }
  545. }
  546. if (!embd.empty() && !path_session.empty()) {
  547. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  548. n_session_consumed = session_tokens.size();
  549. }
  550. }
  551. embd.clear();
  552. embd_guidance.clear();
  553. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  554. // optionally save the session on first sample (for faster prompt loading next time)
  555. if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
  556. need_to_save_session = false;
  557. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  558. LOG("saved session to %s\n", path_session.c_str());
  559. }
  560. const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
  561. llama_sampling_accept(ctx_sampling, ctx, id, /* apply_grammar= */ true);
  562. LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
  563. embd.push_back(id);
  564. // echo this to console
  565. input_echo = true;
  566. // decrement remaining sampling budget
  567. --n_remain;
  568. LOG("n_remain: %d\n", n_remain);
  569. } else {
  570. // some user input remains from prompt or interaction, forward it to processing
  571. LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
  572. while ((int) embd_inp.size() > n_consumed) {
  573. embd.push_back(embd_inp[n_consumed]);
  574. // push the prompt in the sampling context in order to apply repetition penalties later
  575. // for the prompt, we don't apply grammar rules
  576. llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], /* apply_grammar= */ false);
  577. ++n_consumed;
  578. if ((int) embd.size() >= params.n_batch) {
  579. break;
  580. }
  581. }
  582. }
  583. // display text
  584. if (input_echo && display) {
  585. for (auto id : embd) {
  586. const std::string token_str = llama_token_to_piece(ctx, id, params.special);
  587. // Console/Stream Output
  588. fprintf(stdout, "%s", token_str.c_str());
  589. // Record Displayed Tokens To Log
  590. // Note: Generated tokens are created one by one hence this check
  591. if (embd.size() > 1) {
  592. // Incoming Requested Tokens
  593. input_tokens.push_back(id);
  594. } else {
  595. // Outgoing Generated Tokens
  596. output_tokens.push_back(id);
  597. output_ss << token_str;
  598. }
  599. fflush(stdout);
  600. }
  601. }
  602. // reset color to default if there is no pending user input
  603. if (input_echo && (int) embd_inp.size() == n_consumed) {
  604. console::set_display(console::reset);
  605. display = true;
  606. }
  607. // if not currently processing queued inputs;
  608. if ((int) embd_inp.size() <= n_consumed) {
  609. // check for reverse prompt in the last n_prev tokens
  610. if (!params.antiprompt.empty()) {
  611. const int n_prev = 32;
  612. const std::string last_output = llama_sampling_prev_str(ctx_sampling, ctx, n_prev);
  613. is_antiprompt = false;
  614. // Check if each of the reverse prompts appears at the end of the output.
  615. // If we're not running interactively, the reverse prompt might be tokenized with some following characters
  616. // so we'll compensate for that by widening the search window a bit.
  617. for (std::string & antiprompt : params.antiprompt) {
  618. size_t extra_padding = params.interactive ? 0 : 2;
  619. size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
  620. ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
  621. : 0;
  622. if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
  623. if (params.interactive) {
  624. is_interacting = true;
  625. }
  626. is_antiprompt = true;
  627. break;
  628. }
  629. }
  630. // check for reverse prompt using special tokens
  631. llama_token last_token = llama_sampling_last(ctx_sampling);
  632. for (std::vector<llama_token> ids : antiprompt_ids) {
  633. if (ids.size() == 1 && last_token == ids[0]) {
  634. if (params.interactive) {
  635. is_interacting = true;
  636. }
  637. is_antiprompt = true;
  638. break;
  639. }
  640. }
  641. if (is_antiprompt) {
  642. LOG("found antiprompt: %s\n", last_output.c_str());
  643. }
  644. }
  645. // deal with end of generation tokens in interactive mode
  646. if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
  647. LOG("found an EOG token\n");
  648. if (params.interactive) {
  649. if (!params.antiprompt.empty()) {
  650. // tokenize and inject first reverse prompt
  651. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
  652. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  653. is_antiprompt = true;
  654. }
  655. is_interacting = true;
  656. printf("\n");
  657. }
  658. }
  659. if (n_past > 0 && is_interacting) {
  660. LOG("waiting for user input\n");
  661. if (params.conversation) {
  662. printf("\n> ");
  663. }
  664. if (params.input_prefix_bos) {
  665. LOG("adding input prefix BOS token\n");
  666. embd_inp.push_back(llama_token_bos(model));
  667. }
  668. std::string buffer;
  669. if (!params.input_prefix.empty() && !params.conversation) {
  670. LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
  671. printf("%s", params.input_prefix.c_str());
  672. }
  673. // color user input only
  674. console::set_display(console::user_input);
  675. display = params.display_prompt;
  676. std::string line;
  677. bool another_line = true;
  678. do {
  679. another_line = console::readline(line, params.multiline_input);
  680. buffer += line;
  681. } while (another_line);
  682. // done taking input, reset color
  683. console::set_display(console::reset);
  684. display = true;
  685. // Add tokens to embd only if the input buffer is non-empty
  686. // Entering a empty line lets the user pass control back
  687. if (buffer.length() > 1) {
  688. // append input suffix if any
  689. if (!params.input_suffix.empty() && !params.conversation) {
  690. LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
  691. printf("%s", params.input_suffix.c_str());
  692. }
  693. LOG("buffer: '%s'\n", buffer.c_str());
  694. const size_t original_size = embd_inp.size();
  695. if (params.escape) {
  696. string_process_escapes(buffer);
  697. }
  698. const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
  699. const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
  700. const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
  701. LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
  702. embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
  703. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  704. embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
  705. for (size_t i = original_size; i < embd_inp.size(); ++i) {
  706. const llama_token token = embd_inp[i];
  707. output_tokens.push_back(token);
  708. output_ss << llama_token_to_piece(ctx, token);
  709. }
  710. n_remain -= line_inp.size();
  711. LOG("n_remain: %d\n", n_remain);
  712. } else {
  713. LOG("empty line, passing control back\n");
  714. }
  715. input_echo = false; // do not echo this again
  716. }
  717. if (n_past > 0) {
  718. if (is_interacting) {
  719. llama_sampling_reset(ctx_sampling);
  720. }
  721. is_interacting = false;
  722. }
  723. }
  724. // end of generation
  725. if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) {
  726. LOG_TEE(" [end of text]\n");
  727. break;
  728. }
  729. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  730. // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
  731. if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
  732. n_remain = params.n_predict;
  733. is_interacting = true;
  734. }
  735. }
  736. if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
  737. LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
  738. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  739. }
  740. llama_print_timings(ctx);
  741. write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
  742. if (ctx_guidance) { llama_free(ctx_guidance); }
  743. llama_free(ctx);
  744. llama_free_model(model);
  745. llama_sampling_free(ctx_sampling);
  746. llama_backend_free();
  747. #ifndef LOG_DISABLE_LOGS
  748. LOG_TEE("Log end\n");
  749. #endif // LOG_DISABLE_LOGS
  750. return 0;
  751. }