main.cpp 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. #include "common.h"
  2. #include "console.h"
  3. #include "llama.h"
  4. #include <cassert>
  5. #include <cinttypes>
  6. #include <cmath>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <ctime>
  10. #include <fstream>
  11. #include <iostream>
  12. #include <sstream>
  13. #include <string>
  14. #include <vector>
  15. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  16. #include <signal.h>
  17. #include <unistd.h>
  18. #elif defined (_WIN32)
  19. #define WIN32_LEAN_AND_MEAN
  20. #ifndef NOMINMAX
  21. #define NOMINMAX
  22. #endif
  23. #include <windows.h>
  24. #include <signal.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. #pragma warning(disable: 4244 4267) // possible loss of data
  28. #endif
  29. static llama_context ** g_ctx;
  30. static llama_model ** g_model;
  31. static gpt_params * g_params;
  32. static std::vector<llama_token> * g_input_tokens;
  33. static std::ostringstream * g_output_ss;
  34. static std::vector<llama_token> * g_output_tokens;
  35. static bool is_interacting = false;
  36. static bool file_exists(const std::string & path) {
  37. std::ifstream f(path.c_str());
  38. return f.good();
  39. }
  40. static bool file_is_empty(const std::string & path) {
  41. std::ifstream f;
  42. f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
  43. f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
  44. return f.tellg() == 0;
  45. }
  46. static void write_logfile(
  47. const llama_context * ctx, const gpt_params & params, const llama_model * model,
  48. const std::vector<llama_token> & input_tokens, const std::string & output,
  49. const std::vector<llama_token> & output_tokens
  50. ) {
  51. if (params.logdir.empty()) {
  52. return;
  53. }
  54. const std::string timestamp = string_get_sortable_timestamp();
  55. const bool success = fs_create_directory_with_parents(params.logdir);
  56. if (!success) {
  57. fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
  58. __func__, params.logdir.c_str());
  59. return;
  60. }
  61. const std::string logfile_path = params.logdir + timestamp + ".yml";
  62. FILE * logfile = fopen(logfile_path.c_str(), "w");
  63. if (logfile == NULL) {
  64. fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
  65. return;
  66. }
  67. fprintf(logfile, "binary: main\n");
  68. char model_desc[128];
  69. llama_model_desc(model, model_desc, sizeof(model_desc));
  70. yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
  71. fprintf(logfile, "\n");
  72. fprintf(logfile, "######################\n");
  73. fprintf(logfile, "# Generation Results #\n");
  74. fprintf(logfile, "######################\n");
  75. fprintf(logfile, "\n");
  76. yaml_dump_string_multiline(logfile, "output", output.c_str());
  77. yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
  78. llama_dump_timing_info_yaml(logfile, ctx);
  79. fclose(logfile);
  80. }
  81. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  82. static void sigint_handler(int signo) {
  83. if (signo == SIGINT) {
  84. if (!is_interacting && g_params->interactive) {
  85. is_interacting = true;
  86. } else {
  87. console::cleanup();
  88. printf("\n");
  89. llama_print_timings(*g_ctx);
  90. write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
  91. _exit(130);
  92. }
  93. }
  94. }
  95. #endif
  96. static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
  97. (void) level;
  98. (void) user_data;
  99. LOG_TEE("%s", text);
  100. }
  101. static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, std::string role, std::string content) {
  102. llama_chat_msg new_msg{role, content};
  103. auto formatted = llama_chat_format_single(
  104. model, g_params->chat_template, chat_msgs, new_msg, role == "user");
  105. chat_msgs.push_back({role, content});
  106. return formatted;
  107. }
  108. int main(int argc, char ** argv) {
  109. gpt_params params;
  110. g_params = &params;
  111. if (!gpt_params_parse(argc, argv, params)) {
  112. gpt_params_print_usage(argc, argv, params);
  113. return 1;
  114. }
  115. llama_sampling_params & sparams = params.sparams;
  116. #ifndef LOG_DISABLE_LOGS
  117. log_set_target(log_filename_generator("main", "log"));
  118. LOG_TEE("Log start\n");
  119. log_dump_cmdline(argc, argv);
  120. llama_log_set(llama_log_callback_logTee, nullptr);
  121. #endif // LOG_DISABLE_LOGS
  122. // TODO: Dump params ?
  123. //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
  124. // save choice to use color for later
  125. // (note for later: this is a slightly awkward choice)
  126. console::init(params.simple_io, params.use_color);
  127. atexit([]() { console::cleanup(); });
  128. if (params.logits_all) {
  129. printf("\n************\n");
  130. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  131. printf("************\n\n");
  132. return 0;
  133. }
  134. if (params.embedding) {
  135. printf("\n************\n");
  136. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  137. printf("************\n\n");
  138. return 0;
  139. }
  140. if (params.n_ctx != 0 && params.n_ctx < 8) {
  141. LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
  142. params.n_ctx = 8;
  143. }
  144. if (params.rope_freq_base != 0.0) {
  145. LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
  146. }
  147. if (params.rope_freq_scale != 0.0) {
  148. LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
  149. }
  150. LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  151. LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  152. if (params.seed == LLAMA_DEFAULT_SEED) {
  153. params.seed = time(NULL);
  154. }
  155. LOG_TEE("%s: seed = %u\n", __func__, params.seed);
  156. std::mt19937 rng(params.seed);
  157. LOG("%s: llama backend init\n", __func__);
  158. llama_backend_init();
  159. llama_numa_init(params.numa);
  160. llama_model * model;
  161. llama_context * ctx;
  162. llama_context * ctx_guidance = NULL;
  163. std::vector<llama_chat_msg> chat_msgs;
  164. g_model = &model;
  165. g_ctx = &ctx;
  166. // load the model and apply lora adapter, if any
  167. LOG("%s: load the model and apply lora adapter, if any\n", __func__);
  168. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  169. if (sparams.cfg_scale > 1.f) {
  170. struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
  171. ctx_guidance = llama_new_context_with_model(model, lparams);
  172. }
  173. if (model == NULL) {
  174. LOG_TEE("%s: error: unable to load model\n", __func__);
  175. return 1;
  176. }
  177. const int n_ctx_train = llama_n_ctx_train(model);
  178. const int n_ctx = llama_n_ctx(ctx);
  179. LOG("n_ctx: %d\n", n_ctx);
  180. if (n_ctx > n_ctx_train) {
  181. LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
  182. __func__, n_ctx_train, n_ctx);
  183. }
  184. LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
  185. // print system information
  186. {
  187. LOG_TEE("\n");
  188. LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
  189. }
  190. std::string path_session = params.path_prompt_cache;
  191. std::vector<llama_token> session_tokens;
  192. if (!path_session.empty()) {
  193. LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
  194. if (!file_exists(path_session)) {
  195. LOG_TEE("%s: session file does not exist, will create.\n", __func__);
  196. } else if (file_is_empty(path_session)) {
  197. LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
  198. } else {
  199. // The file exists and is not empty
  200. session_tokens.resize(n_ctx);
  201. size_t n_token_count_out = 0;
  202. if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
  203. LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
  204. return 1;
  205. }
  206. session_tokens.resize(n_token_count_out);
  207. LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
  208. }
  209. }
  210. const bool add_bos = llama_should_add_bos_token(model);
  211. GGML_ASSERT(llama_add_eos_token(model) != 1);
  212. LOG("add_bos: %d\n", add_bos);
  213. std::vector<llama_token> embd_inp;
  214. {
  215. auto prompt = (params.conversation && params.enable_chat_template)
  216. ? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
  217. : params.prompt;
  218. if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
  219. LOG("tokenize the prompt\n");
  220. embd_inp = ::llama_tokenize(ctx, prompt, true, true);
  221. } else {
  222. LOG("use session tokens\n");
  223. embd_inp = session_tokens;
  224. }
  225. LOG("prompt: \"%s\"\n", log_tostr(prompt));
  226. LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  227. }
  228. // Should not run without any tokens
  229. if (embd_inp.empty()) {
  230. embd_inp.push_back(llama_token_bos(model));
  231. LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
  232. }
  233. // Tokenize negative prompt
  234. std::vector<llama_token> guidance_inp;
  235. int guidance_offset = 0;
  236. int original_prompt_len = 0;
  237. if (ctx_guidance) {
  238. LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
  239. guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true, true);
  240. LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
  241. std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true, true);
  242. LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
  243. original_prompt_len = original_inp.size();
  244. guidance_offset = (int)guidance_inp.size() - original_prompt_len;
  245. LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
  246. LOG("guidance_offset: %s", log_tostr(guidance_offset));
  247. }
  248. if ((int) embd_inp.size() > n_ctx - 4) {
  249. LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  250. return 1;
  251. }
  252. // debug message about similarity of saved session, if applicable
  253. size_t n_matching_session_tokens = 0;
  254. if (!session_tokens.empty()) {
  255. for (llama_token id : session_tokens) {
  256. if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
  257. break;
  258. }
  259. n_matching_session_tokens++;
  260. }
  261. if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
  262. LOG_TEE("%s: using full prompt from session file\n", __func__);
  263. } else if (n_matching_session_tokens >= embd_inp.size()) {
  264. LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
  265. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  266. LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
  267. __func__, n_matching_session_tokens, embd_inp.size());
  268. } else {
  269. LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
  270. __func__, n_matching_session_tokens, embd_inp.size());
  271. }
  272. // remove any "future" tokens that we might have inherited from the previous session
  273. llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
  274. }
  275. LOGLN(
  276. "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
  277. log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
  278. // if we will use the cache for the full prompt without reaching the end of the cache, force
  279. // reevaluation of the last token to recalculate the cached logits
  280. if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
  281. LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
  282. session_tokens.resize(embd_inp.size() - 1);
  283. }
  284. // number of tokens to keep when resetting context
  285. if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
  286. params.n_keep = (int)embd_inp.size();
  287. } else {
  288. params.n_keep += add_bos; // always keep the BOS token
  289. }
  290. if (params.conversation) {
  291. params.interactive_first = true;
  292. }
  293. // enable interactive mode if interactive start is specified
  294. if (params.interactive_first) {
  295. params.interactive = true;
  296. }
  297. if (params.verbose_prompt) {
  298. LOG_TEE("\n");
  299. LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  300. LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  301. for (int i = 0; i < (int) embd_inp.size(); i++) {
  302. LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
  303. }
  304. if (ctx_guidance) {
  305. LOG_TEE("\n");
  306. LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
  307. LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
  308. for (int i = 0; i < (int) guidance_inp.size(); i++) {
  309. LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
  310. }
  311. }
  312. if (params.n_keep > add_bos) {
  313. LOG_TEE("%s: static prompt based on n_keep: '", __func__);
  314. for (int i = 0; i < params.n_keep; i++) {
  315. LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
  316. }
  317. LOG_TEE("'\n");
  318. }
  319. LOG_TEE("\n");
  320. }
  321. // ctrl+C handling
  322. {
  323. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  324. struct sigaction sigint_action;
  325. sigint_action.sa_handler = sigint_handler;
  326. sigemptyset (&sigint_action.sa_mask);
  327. sigint_action.sa_flags = 0;
  328. sigaction(SIGINT, &sigint_action, NULL);
  329. #elif defined (_WIN32)
  330. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  331. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  332. };
  333. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  334. #endif
  335. }
  336. if (params.interactive) {
  337. LOG_TEE("%s: interactive mode on.\n", __func__);
  338. if (!params.antiprompt.empty()) {
  339. for (const auto & antiprompt : params.antiprompt) {
  340. LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
  341. if (params.verbose_prompt) {
  342. auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
  343. for (int i = 0; i < (int) tmp.size(); i++) {
  344. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  345. }
  346. }
  347. }
  348. }
  349. if (params.input_prefix_bos) {
  350. LOG_TEE("Input prefix with BOS\n");
  351. }
  352. if (!params.input_prefix.empty()) {
  353. LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
  354. if (params.verbose_prompt) {
  355. auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
  356. for (int i = 0; i < (int) tmp.size(); i++) {
  357. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  358. }
  359. }
  360. }
  361. if (!params.input_suffix.empty()) {
  362. LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
  363. if (params.verbose_prompt) {
  364. auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
  365. for (int i = 0; i < (int) tmp.size(); i++) {
  366. LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
  367. }
  368. }
  369. }
  370. }
  371. LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
  372. LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
  373. LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  374. // group-attention state
  375. // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
  376. int ga_i = 0;
  377. const int ga_n = params.grp_attn_n;
  378. const int ga_w = params.grp_attn_w;
  379. if (ga_n != 1) {
  380. GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
  381. GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
  382. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
  383. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
  384. LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
  385. }
  386. LOG_TEE("\n\n");
  387. if (params.interactive) {
  388. const char * control_message;
  389. if (params.multiline_input) {
  390. control_message = " - To return control to the AI, end your input with '\\'.\n"
  391. " - To return control without starting a new line, end your input with '/'.\n";
  392. } else {
  393. control_message = " - Press Return to return control to the AI.\n"
  394. " - To return control without starting a new line, end your input with '/'.\n"
  395. " - If you want to submit another line, end your input with '\\'.\n";
  396. }
  397. LOG_TEE("== Running in interactive mode. ==\n");
  398. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  399. LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
  400. #endif
  401. LOG_TEE( "%s\n", control_message);
  402. is_interacting = params.interactive_first;
  403. }
  404. bool is_antiprompt = false;
  405. bool input_echo = true;
  406. bool display = true;
  407. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  408. int n_past = 0;
  409. int n_remain = params.n_predict;
  410. int n_consumed = 0;
  411. int n_session_consumed = 0;
  412. int n_past_guidance = 0;
  413. std::vector<int> input_tokens; g_input_tokens = &input_tokens;
  414. std::vector<int> output_tokens; g_output_tokens = &output_tokens;
  415. std::ostringstream output_ss; g_output_ss = &output_ss;
  416. std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
  417. // the first thing we will do is to output the prompt, so set color accordingly
  418. console::set_display(console::prompt);
  419. display = params.display_prompt;
  420. std::vector<llama_token> embd;
  421. std::vector<llama_token> embd_guidance;
  422. // tokenized antiprompts
  423. std::vector<std::vector<llama_token>> antiprompt_ids;
  424. antiprompt_ids.reserve(params.antiprompt.size());
  425. for (const std::string & antiprompt : params.antiprompt) {
  426. antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
  427. }
  428. struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
  429. if (!ctx_sampling) {
  430. fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
  431. exit(1);
  432. }
  433. while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
  434. // predict
  435. if (!embd.empty()) {
  436. // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
  437. // --prompt or --file which uses the same value.
  438. int max_embd_size = n_ctx - 4;
  439. // Ensure the input doesn't exceed the context size by truncating embd if necessary.
  440. if ((int) embd.size() > max_embd_size) {
  441. const int skipped_tokens = (int) embd.size() - max_embd_size;
  442. embd.resize(max_embd_size);
  443. console::set_display(console::error);
  444. printf("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
  445. console::set_display(console::reset);
  446. fflush(stdout);
  447. }
  448. if (ga_n == 1) {
  449. // infinite text generation via context shifting
  450. // if we run out of context:
  451. // - take the n_keep first tokens from the original prompt (via n_past)
  452. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
  453. if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) >= n_ctx) {
  454. if (params.n_predict == -2) {
  455. LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
  456. break;
  457. }
  458. const int n_left = n_past - params.n_keep;
  459. const int n_discard = n_left/2;
  460. LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
  461. n_past, n_left, n_ctx, params.n_keep, n_discard);
  462. llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
  463. llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
  464. n_past -= n_discard;
  465. if (ctx_guidance) {
  466. n_past_guidance -= n_discard;
  467. }
  468. LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
  469. LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  470. LOG("clear session path\n");
  471. path_session.clear();
  472. }
  473. } else {
  474. // context extension via Self-Extend
  475. while (n_past >= ga_i + ga_w) {
  476. const int ib = (ga_n*ga_i)/ga_w;
  477. const int bd = (ga_w/ga_n)*(ga_n - 1);
  478. const int dd = (ga_w/ga_n) - ib*bd - ga_w;
  479. LOG("\n");
  480. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
  481. LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
  482. LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
  483. llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd);
  484. llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
  485. llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
  486. n_past -= bd;
  487. ga_i += ga_w/ga_n;
  488. LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
  489. }
  490. }
  491. // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
  492. if (n_session_consumed < (int) session_tokens.size()) {
  493. size_t i = 0;
  494. for ( ; i < embd.size(); i++) {
  495. if (embd[i] != session_tokens[n_session_consumed]) {
  496. session_tokens.resize(n_session_consumed);
  497. break;
  498. }
  499. n_past++;
  500. n_session_consumed++;
  501. if (n_session_consumed >= (int) session_tokens.size()) {
  502. ++i;
  503. break;
  504. }
  505. }
  506. if (i > 0) {
  507. embd.erase(embd.begin(), embd.begin() + i);
  508. }
  509. }
  510. // evaluate tokens in batches
  511. // embd is typically prepared beforehand to fit within a batch, but not always
  512. if (ctx_guidance) {
  513. int input_size = 0;
  514. llama_token * input_buf = NULL;
  515. if (n_past_guidance < (int) guidance_inp.size()) {
  516. // Guidance context should have the same data with these modifications:
  517. //
  518. // * Replace the initial prompt
  519. // * Shift everything by guidance_offset
  520. embd_guidance = guidance_inp;
  521. if (embd.begin() + original_prompt_len < embd.end()) {
  522. embd_guidance.insert(
  523. embd_guidance.end(),
  524. embd.begin() + original_prompt_len,
  525. embd.end()
  526. );
  527. }
  528. input_buf = embd_guidance.data();
  529. input_size = embd_guidance.size();
  530. LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
  531. } else {
  532. input_buf = embd.data();
  533. input_size = embd.size();
  534. }
  535. for (int i = 0; i < input_size; i += params.n_batch) {
  536. int n_eval = std::min(input_size - i, params.n_batch);
  537. if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) {
  538. LOG_TEE("%s : failed to eval\n", __func__);
  539. return 1;
  540. }
  541. n_past_guidance += n_eval;
  542. }
  543. }
  544. for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
  545. int n_eval = (int) embd.size() - i;
  546. if (n_eval > params.n_batch) {
  547. n_eval = params.n_batch;
  548. }
  549. LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
  550. if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
  551. LOG_TEE("%s : failed to eval\n", __func__);
  552. return 1;
  553. }
  554. n_past += n_eval;
  555. LOG("n_past = %d\n", n_past);
  556. // Display total tokens alongside total time
  557. if (params.n_print > 0 && n_past % params.n_print == 0) {
  558. LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
  559. }
  560. }
  561. if (!embd.empty() && !path_session.empty()) {
  562. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  563. n_session_consumed = session_tokens.size();
  564. }
  565. }
  566. embd.clear();
  567. embd_guidance.clear();
  568. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  569. // optionally save the session on first sample (for faster prompt loading next time)
  570. if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
  571. need_to_save_session = false;
  572. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  573. LOG("saved session to %s\n", path_session.c_str());
  574. }
  575. const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
  576. llama_sampling_accept(ctx_sampling, ctx, id, /* apply_grammar= */ true);
  577. LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
  578. embd.push_back(id);
  579. // echo this to console
  580. input_echo = true;
  581. // decrement remaining sampling budget
  582. --n_remain;
  583. LOG("n_remain: %d\n", n_remain);
  584. } else {
  585. // some user input remains from prompt or interaction, forward it to processing
  586. LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
  587. while ((int) embd_inp.size() > n_consumed) {
  588. embd.push_back(embd_inp[n_consumed]);
  589. // push the prompt in the sampling context in order to apply repetition penalties later
  590. // for the prompt, we don't apply grammar rules
  591. llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], /* apply_grammar= */ false);
  592. ++n_consumed;
  593. if ((int) embd.size() >= params.n_batch) {
  594. break;
  595. }
  596. }
  597. }
  598. // display text
  599. if (input_echo && display) {
  600. for (auto id : embd) {
  601. const std::string token_str = llama_token_to_piece(ctx, id, params.special);
  602. // Console/Stream Output
  603. fprintf(stdout, "%s", token_str.c_str());
  604. // Record Displayed Tokens To Log
  605. // Note: Generated tokens are created one by one hence this check
  606. if (embd.size() > 1) {
  607. // Incoming Requested Tokens
  608. input_tokens.push_back(id);
  609. } else {
  610. // Outgoing Generated Tokens
  611. output_tokens.push_back(id);
  612. output_ss << token_str;
  613. }
  614. fflush(stdout);
  615. }
  616. }
  617. // reset color to default if there is no pending user input
  618. if (input_echo && (int) embd_inp.size() == n_consumed) {
  619. console::set_display(console::reset);
  620. display = true;
  621. }
  622. // if not currently processing queued inputs;
  623. if ((int) embd_inp.size() <= n_consumed) {
  624. // check for reverse prompt in the last n_prev tokens
  625. if (!params.antiprompt.empty()) {
  626. const int n_prev = 32;
  627. const std::string last_output = llama_sampling_prev_str(ctx_sampling, ctx, n_prev);
  628. is_antiprompt = false;
  629. // Check if each of the reverse prompts appears at the end of the output.
  630. // If we're not running interactively, the reverse prompt might be tokenized with some following characters
  631. // so we'll compensate for that by widening the search window a bit.
  632. for (std::string & antiprompt : params.antiprompt) {
  633. size_t extra_padding = params.interactive ? 0 : 2;
  634. size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
  635. ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
  636. : 0;
  637. if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
  638. if (params.interactive) {
  639. is_interacting = true;
  640. }
  641. is_antiprompt = true;
  642. break;
  643. }
  644. }
  645. // check for reverse prompt using special tokens
  646. llama_token last_token = llama_sampling_last(ctx_sampling);
  647. for (std::vector<llama_token> ids : antiprompt_ids) {
  648. if (ids.size() == 1 && last_token == ids[0]) {
  649. if (params.interactive) {
  650. is_interacting = true;
  651. }
  652. is_antiprompt = true;
  653. break;
  654. }
  655. }
  656. if (is_antiprompt) {
  657. LOG("found antiprompt: %s\n", last_output.c_str());
  658. }
  659. }
  660. // deal with end of generation tokens in interactive mode
  661. if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
  662. LOG("found an EOG token\n");
  663. if (params.interactive) {
  664. if (!params.antiprompt.empty()) {
  665. // tokenize and inject first reverse prompt
  666. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
  667. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  668. is_antiprompt = true;
  669. }
  670. if (params.enable_chat_template) {
  671. chat_add_and_format(model, chat_msgs, "assistant", assistant_ss.str());
  672. }
  673. is_interacting = true;
  674. printf("\n");
  675. }
  676. }
  677. // if current token is not EOG, we add it to current assistant message
  678. if (params.conversation) {
  679. auto id = llama_sampling_last(ctx_sampling);
  680. assistant_ss << llama_token_to_piece(ctx, id, false);
  681. }
  682. if (n_past > 0 && is_interacting) {
  683. LOG("waiting for user input\n");
  684. if (params.conversation) {
  685. printf("\n> ");
  686. }
  687. if (params.input_prefix_bos) {
  688. LOG("adding input prefix BOS token\n");
  689. embd_inp.push_back(llama_token_bos(model));
  690. }
  691. std::string buffer;
  692. if (!params.input_prefix.empty() && !params.conversation) {
  693. LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
  694. printf("%s", params.input_prefix.c_str());
  695. }
  696. // color user input only
  697. console::set_display(console::user_input);
  698. display = params.display_prompt;
  699. std::string line;
  700. bool another_line = true;
  701. do {
  702. another_line = console::readline(line, params.multiline_input);
  703. buffer += line;
  704. } while (another_line);
  705. // done taking input, reset color
  706. console::set_display(console::reset);
  707. display = true;
  708. // Add tokens to embd only if the input buffer is non-empty
  709. // Entering a empty line lets the user pass control back
  710. if (buffer.length() > 1) {
  711. // append input suffix if any
  712. if (!params.input_suffix.empty() && !params.conversation) {
  713. LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
  714. printf("%s", params.input_suffix.c_str());
  715. }
  716. LOG("buffer: '%s'\n", buffer.c_str());
  717. const size_t original_size = embd_inp.size();
  718. if (params.escape) {
  719. string_process_escapes(buffer);
  720. }
  721. bool format_chat = params.conversation && params.enable_chat_template;
  722. std::string user_inp = format_chat
  723. ? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
  724. : std::move(buffer);
  725. // TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
  726. const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
  727. const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
  728. const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
  729. LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
  730. embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
  731. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  732. embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
  733. for (size_t i = original_size; i < embd_inp.size(); ++i) {
  734. const llama_token token = embd_inp[i];
  735. output_tokens.push_back(token);
  736. output_ss << llama_token_to_piece(ctx, token);
  737. }
  738. // reset assistant message
  739. assistant_ss.str("");
  740. n_remain -= line_inp.size();
  741. LOG("n_remain: %d\n", n_remain);
  742. } else {
  743. LOG("empty line, passing control back\n");
  744. }
  745. input_echo = false; // do not echo this again
  746. }
  747. if (n_past > 0) {
  748. if (is_interacting) {
  749. llama_sampling_reset(ctx_sampling);
  750. }
  751. is_interacting = false;
  752. }
  753. }
  754. // end of generation
  755. if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) {
  756. LOG_TEE(" [end of text]\n");
  757. break;
  758. }
  759. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  760. // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
  761. if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
  762. n_remain = params.n_predict;
  763. is_interacting = true;
  764. }
  765. }
  766. if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
  767. LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
  768. llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
  769. }
  770. llama_print_timings(ctx);
  771. write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
  772. if (ctx_guidance) { llama_free(ctx_guidance); }
  773. llama_free(ctx);
  774. llama_free_model(model);
  775. llama_sampling_free(ctx_sampling);
  776. llama_backend_free();
  777. #ifndef LOG_DISABLE_LOGS
  778. LOG_TEE("Log end\n");
  779. #endif // LOG_DISABLE_LOGS
  780. return 0;
  781. }