main.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cassert>
  4. #include <cinttypes>
  5. #include <cmath>
  6. #include <cstdio>
  7. #include <cstring>
  8. #include <fstream>
  9. #include <iostream>
  10. #include <string>
  11. #include <vector>
  12. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  13. #include <signal.h>
  14. #include <unistd.h>
  15. #elif defined (_WIN32)
  16. #include <signal.h>
  17. #endif
  18. #if defined (_WIN32)
  19. #pragma comment(lib,"kernel32.lib")
  20. extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle);
  21. extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode);
  22. extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
  23. #endif
  24. #define ANSI_COLOR_RED "\x1b[31m"
  25. #define ANSI_COLOR_GREEN "\x1b[32m"
  26. #define ANSI_COLOR_YELLOW "\x1b[33m"
  27. #define ANSI_COLOR_BLUE "\x1b[34m"
  28. #define ANSI_COLOR_MAGENTA "\x1b[35m"
  29. #define ANSI_COLOR_CYAN "\x1b[36m"
  30. #define ANSI_COLOR_RESET "\x1b[0m"
  31. #define ANSI_BOLD "\x1b[1m"
  32. /* Keep track of current color of output, and emit ANSI code if it changes. */
  33. enum console_state {
  34. CONSOLE_STATE_DEFAULT=0,
  35. CONSOLE_STATE_PROMPT,
  36. CONSOLE_STATE_USER_INPUT
  37. };
  38. static console_state con_st = CONSOLE_STATE_DEFAULT;
  39. static bool con_use_color = false;
  40. void enable_console_colors() {
  41. #if defined (_WIN32)
  42. // Enable ANSI colors on Windows 10+
  43. unsigned long dwMode = 0;
  44. void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11)
  45. if (hConOut && hConOut != (void*)-1 && GetConsoleMode(hConOut, &dwMode) && !(dwMode & 0x4)) {
  46. SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
  47. }
  48. #endif
  49. }
  50. void set_console_state(console_state new_st) {
  51. if (!con_use_color) return;
  52. // only emit color code if state changed
  53. if (new_st != con_st) {
  54. con_st = new_st;
  55. switch(con_st) {
  56. case CONSOLE_STATE_DEFAULT:
  57. printf(ANSI_COLOR_RESET);
  58. return;
  59. case CONSOLE_STATE_PROMPT:
  60. printf(ANSI_COLOR_YELLOW);
  61. return;
  62. case CONSOLE_STATE_USER_INPUT:
  63. printf(ANSI_BOLD ANSI_COLOR_GREEN);
  64. return;
  65. }
  66. }
  67. }
  68. static bool is_interacting = false;
  69. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  70. void sigint_handler(int signo) {
  71. set_console_state(CONSOLE_STATE_DEFAULT);
  72. printf("\n"); // this also force flush stdout.
  73. if (signo == SIGINT) {
  74. if (!is_interacting) {
  75. is_interacting=true;
  76. } else {
  77. _exit(130);
  78. }
  79. }
  80. }
  81. #endif
  82. int main(int argc, char ** argv) {
  83. gpt_params params;
  84. params.model = "models/llama-7B/ggml-model.bin";
  85. if (gpt_params_parse(argc, argv, params) == false) {
  86. return 1;
  87. }
  88. if (params.perplexity) {
  89. printf("\n************\n");
  90. printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
  91. printf("************\n\n");
  92. return 0;
  93. }
  94. if (params.embedding) {
  95. printf("\n************\n");
  96. printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
  97. printf("************\n\n");
  98. return 0;
  99. }
  100. if (params.n_ctx > 2048) {
  101. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  102. "expect poor results\n", __func__, params.n_ctx);
  103. }
  104. if (params.seed <= 0) {
  105. params.seed = time(NULL);
  106. }
  107. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  108. std::mt19937 rng(params.seed);
  109. if (params.random_prompt) {
  110. params.prompt = gpt_random_prompt(rng);
  111. }
  112. // save choice to use color for later
  113. // (note for later: this is a slightly awkward choice)
  114. con_use_color = params.use_color;
  115. // params.prompt = R"(// this function checks if the number n is prime
  116. //bool is_prime(int n) {)";
  117. llama_context * ctx;
  118. // load the model
  119. {
  120. auto lparams = llama_context_default_params();
  121. lparams.n_ctx = params.n_ctx;
  122. lparams.n_parts = params.n_parts;
  123. lparams.seed = params.seed;
  124. lparams.f16_kv = params.memory_f16;
  125. lparams.use_mlock = params.use_mlock;
  126. ctx = llama_init_from_file(params.model.c_str(), lparams);
  127. if (ctx == NULL) {
  128. fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
  129. return 1;
  130. }
  131. }
  132. // print system information
  133. {
  134. fprintf(stderr, "\n");
  135. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  136. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  137. }
  138. // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
  139. // uncomment the "used_mem" line in llama.cpp to see the results
  140. if (params.mem_test) {
  141. {
  142. const std::vector<llama_token> tmp(params.n_batch, 0);
  143. llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
  144. }
  145. {
  146. const std::vector<llama_token> tmp = { 0, };
  147. llama_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
  148. }
  149. llama_print_timings(ctx);
  150. llama_free(ctx);
  151. return 0;
  152. }
  153. // Add a space in front of the first character to match OG llama tokenizer behavior
  154. params.prompt.insert(0, 1, ' ');
  155. // tokenize the prompt
  156. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  157. const int n_ctx = llama_n_ctx(ctx);
  158. if ((int) embd_inp.size() > n_ctx - 4) {
  159. fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
  160. return 1;
  161. }
  162. params.n_keep = std::min(params.n_keep, (int) embd_inp.size());
  163. // prefix & suffix for instruct mode
  164. const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true);
  165. const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
  166. // in instruct mode, we inject a prefix and a suffix to each input by the user
  167. if (params.instruct) {
  168. params.interactive = true;
  169. params.antiprompt.push_back("### Instruction:\n\n");
  170. }
  171. // enable interactive mode if reverse prompt is specified
  172. if (params.antiprompt.size() != 0) {
  173. params.interactive = true;
  174. }
  175. if (params.interactive_start) {
  176. params.interactive = true;
  177. }
  178. // determine newline token
  179. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  180. if (params.verbose_prompt) {
  181. fprintf(stderr, "\n");
  182. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  183. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  184. for (int i = 0; i < (int) embd_inp.size(); i++) {
  185. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  186. }
  187. if (params.n_keep > 0) {
  188. fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
  189. for (int i = 0; i < params.n_keep; i++) {
  190. fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]));
  191. }
  192. fprintf(stderr, "'\n");
  193. }
  194. fprintf(stderr, "\n");
  195. }
  196. if (params.interactive) {
  197. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  198. struct sigaction sigint_action;
  199. sigint_action.sa_handler = sigint_handler;
  200. sigemptyset (&sigint_action.sa_mask);
  201. sigint_action.sa_flags = 0;
  202. sigaction(SIGINT, &sigint_action, NULL);
  203. #elif defined (_WIN32)
  204. signal(SIGINT, sigint_handler);
  205. #endif
  206. fprintf(stderr, "%s: interactive mode on.\n", __func__);
  207. if (params.antiprompt.size()) {
  208. for (auto antiprompt : params.antiprompt) {
  209. fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
  210. }
  211. }
  212. if (!params.input_prefix.empty()) {
  213. fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
  214. }
  215. }
  216. fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
  217. fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
  218. fprintf(stderr, "\n\n");
  219. // TODO: replace with ring-buffer
  220. std::vector<llama_token> last_n_tokens(n_ctx);
  221. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  222. if (params.interactive) {
  223. fprintf(stderr, "== Running in interactive mode. ==\n"
  224. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
  225. " - Press Ctrl+C to interject at any time.\n"
  226. #endif
  227. " - Press Return to return control to LLaMa.\n"
  228. " - If you want to submit another line, end your input in '\\'.\n\n");
  229. is_interacting = params.interactive_start || params.instruct;
  230. }
  231. bool input_noecho = false;
  232. int n_past = 0;
  233. int n_remain = params.n_predict;
  234. int n_consumed = 0;
  235. // the first thing we will do is to output the prompt, so set color accordingly
  236. if (params.use_color) {
  237. enable_console_colors();
  238. }
  239. set_console_state(CONSOLE_STATE_PROMPT);
  240. std::vector<llama_token> embd;
  241. while (n_remain != 0 || params.interactive) {
  242. // predict
  243. if (embd.size() > 0) {
  244. // infinite text generation via context swapping
  245. // if we run out of context:
  246. // - take the n_keep first tokens from the original prompt (via n_past)
  247. // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch
  248. if (n_past + (int) embd.size() > n_ctx) {
  249. const int n_left = n_past - params.n_keep;
  250. n_past = params.n_keep;
  251. // insert n_left/2 tokens at the start of embd from last_n_tokens
  252. embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
  253. //printf("\n---\n");
  254. //printf("resetting: '");
  255. //for (int i = 0; i < (int) embd.size(); i++) {
  256. // printf("%s", llama_token_to_str(ctx, embd[i]));
  257. //}
  258. //printf("'\n");
  259. //printf("\n---\n");
  260. }
  261. if (llama_eval(ctx, embd.data(), embd.size(), n_past, params.n_threads)) {
  262. fprintf(stderr, "%s : failed to eval\n", __func__);
  263. return 1;
  264. }
  265. }
  266. n_past += embd.size();
  267. embd.clear();
  268. if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
  269. // out of user input, sample next token
  270. const float top_k = params.top_k;
  271. const float top_p = params.top_p;
  272. const float temp = params.temp;
  273. const float repeat_penalty = params.repeat_penalty;
  274. llama_token id = 0;
  275. {
  276. auto logits = llama_get_logits(ctx);
  277. if (params.ignore_eos) {
  278. logits[llama_token_eos()] = 0;
  279. }
  280. id = llama_sample_top_p_top_k(ctx,
  281. last_n_tokens.data() + n_ctx - params.repeat_last_n,
  282. params.repeat_last_n, top_k, top_p, temp, repeat_penalty);
  283. last_n_tokens.erase(last_n_tokens.begin());
  284. last_n_tokens.push_back(id);
  285. }
  286. // replace end of text token with newline token when in interactive mode
  287. if (id == llama_token_eos() && params.interactive && !params.instruct) {
  288. id = llama_token_newline.front();
  289. if (params.antiprompt.size() != 0) {
  290. // tokenize and inject first reverse prompt
  291. const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
  292. embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
  293. }
  294. }
  295. // add it to the context
  296. embd.push_back(id);
  297. // echo this to console
  298. input_noecho = false;
  299. // decrement remaining sampling budget
  300. --n_remain;
  301. } else {
  302. // some user input remains from prompt or interaction, forward it to processing
  303. while ((int) embd_inp.size() > n_consumed) {
  304. embd.push_back(embd_inp[n_consumed]);
  305. last_n_tokens.erase(last_n_tokens.begin());
  306. last_n_tokens.push_back(embd_inp[n_consumed]);
  307. ++n_consumed;
  308. if ((int) embd.size() >= params.n_batch) {
  309. break;
  310. }
  311. }
  312. }
  313. // display text
  314. if (!input_noecho) {
  315. for (auto id : embd) {
  316. printf("%s", llama_token_to_str(ctx, id));
  317. }
  318. fflush(stdout);
  319. }
  320. // reset color to default if we there is no pending user input
  321. if (!input_noecho && (int)embd_inp.size() == n_consumed) {
  322. set_console_state(CONSOLE_STATE_DEFAULT);
  323. }
  324. // in interactive mode, and not currently processing queued inputs;
  325. // check if we should prompt the user for more
  326. if (params.interactive && (int) embd_inp.size() <= n_consumed) {
  327. // check for reverse prompt
  328. std::string last_output;
  329. for (auto id : last_n_tokens) {
  330. last_output += llama_token_to_str(ctx, id);
  331. }
  332. // Check if each of the reverse prompts appears at the end of the output.
  333. for (std::string & antiprompt : params.antiprompt) {
  334. if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
  335. is_interacting = true;
  336. set_console_state(CONSOLE_STATE_USER_INPUT);
  337. fflush(stdout);
  338. break;
  339. }
  340. }
  341. if (n_past > 0 && is_interacting) {
  342. // potentially set color to indicate we are taking user input
  343. set_console_state(CONSOLE_STATE_USER_INPUT);
  344. if (params.instruct) {
  345. n_consumed = embd_inp.size();
  346. embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
  347. printf("\n> ");
  348. }
  349. std::string buffer;
  350. if (!params.input_prefix.empty()) {
  351. buffer += params.input_prefix;
  352. printf("%s", buffer.c_str());
  353. }
  354. std::string line;
  355. bool another_line = true;
  356. do {
  357. std::getline(std::cin, line);
  358. if (line.empty() || line.back() != '\\') {
  359. another_line = false;
  360. } else {
  361. line.pop_back(); // Remove the continue character
  362. }
  363. buffer += line + '\n'; // Append the line to the result
  364. } while (another_line);
  365. // done taking input, reset color
  366. set_console_state(CONSOLE_STATE_DEFAULT);
  367. auto line_inp = ::llama_tokenize(ctx, buffer, false);
  368. embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
  369. if (params.instruct) {
  370. embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
  371. }
  372. n_remain -= line_inp.size();
  373. input_noecho = true; // do not echo this again
  374. }
  375. if (n_past > 0) {
  376. is_interacting = false;
  377. }
  378. }
  379. // end of text token
  380. if (embd.back() == llama_token_eos()) {
  381. if (params.instruct) {
  382. is_interacting = true;
  383. } else {
  384. fprintf(stderr, " [end of text]\n");
  385. break;
  386. }
  387. }
  388. // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
  389. if (params.interactive && n_remain <= 0) {
  390. n_remain = params.n_predict;
  391. is_interacting = true;
  392. }
  393. }
  394. #if defined (_WIN32)
  395. signal(SIGINT, SIG_DFL);
  396. #endif
  397. llama_print_timings(ctx);
  398. llama_free(ctx);
  399. set_console_state(CONSOLE_STATE_DEFAULT);
  400. return 0;
  401. }