run.cpp 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. #include "chat.h"
  2. #include "common.h"
  3. #include "llama-cpp.h"
  4. #include "log.h"
  5. #include "linenoise.cpp/linenoise.h"
  6. #define JSON_ASSERT GGML_ASSERT
  7. #include <nlohmann/json.hpp>
  8. #if defined(_WIN32)
  9. # ifndef NOMINMAX
  10. # define NOMINMAX
  11. # endif
  12. # include <windows.h>
  13. # include <io.h>
  14. #else
  15. # include <sys/file.h>
  16. # include <sys/ioctl.h>
  17. # include <unistd.h>
  18. #endif
  19. #if defined(LLAMA_USE_CURL)
  20. # include <curl/curl.h>
  21. #endif
  22. #include <signal.h>
  23. #include <climits>
  24. #include <cstdarg>
  25. #include <cstdio>
  26. #include <cstring>
  27. #include <filesystem>
  28. #include <iostream>
  29. #include <list>
  30. #include <sstream>
  31. #include <string>
  32. #include <vector>
  33. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
  34. [[noreturn]] static void sigint_handler(int) {
  35. printf("\n" LOG_COL_DEFAULT);
  36. exit(0); // not ideal, but it's the only way to guarantee exit in all cases
  37. }
  38. #endif
  39. GGML_ATTRIBUTE_FORMAT(1, 2)
  40. static int printe(const char * fmt, ...) {
  41. va_list args;
  42. va_start(args, fmt);
  43. const int ret = vfprintf(stderr, fmt, args);
  44. va_end(args);
  45. return ret;
  46. }
  47. static std::string strftime_fmt(const char * fmt, const std::tm & tm) {
  48. std::ostringstream oss;
  49. oss << std::put_time(&tm, fmt);
  50. return oss.str();
  51. }
  52. class Opt {
  53. public:
  54. int init(int argc, const char ** argv) {
  55. ctx_params = llama_context_default_params();
  56. model_params = llama_model_default_params();
  57. context_size_default = ctx_params.n_batch;
  58. n_threads_default = ctx_params.n_threads;
  59. ngl_default = model_params.n_gpu_layers;
  60. common_params_sampling sampling;
  61. temperature_default = sampling.temp;
  62. if (argc < 2) {
  63. printe("Error: No arguments provided.\n");
  64. print_help();
  65. return 1;
  66. }
  67. // Parse arguments
  68. if (parse(argc, argv)) {
  69. printe("Error: Failed to parse arguments.\n");
  70. print_help();
  71. return 1;
  72. }
  73. // If help is requested, show help and exit
  74. if (help) {
  75. print_help();
  76. return 2;
  77. }
  78. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  79. ctx_params.n_ctx = ctx_params.n_batch;
  80. ctx_params.n_threads = ctx_params.n_threads_batch = n_threads >= 0 ? n_threads : n_threads_default;
  81. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  82. temperature = temperature >= 0 ? temperature : temperature_default;
  83. return 0; // Success
  84. }
  85. llama_context_params ctx_params;
  86. llama_model_params model_params;
  87. std::string model_;
  88. std::string chat_template_file;
  89. std::string user;
  90. bool use_jinja = false;
  91. int context_size = -1, ngl = -1, n_threads = -1;
  92. float temperature = -1;
  93. bool verbose = false;
  94. private:
  95. int context_size_default = -1, ngl_default = -1, n_threads_default = -1;
  96. float temperature_default = -1;
  97. bool help = false;
  98. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  99. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  100. }
  101. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  102. if (i + 1 >= argc) {
  103. return 1;
  104. }
  105. option_value = std::atoi(argv[++i]);
  106. return 0;
  107. }
  108. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  109. if (i + 1 >= argc) {
  110. return 1;
  111. }
  112. option_value = std::atof(argv[++i]);
  113. return 0;
  114. }
  115. int handle_option_with_value(int argc, const char ** argv, int & i, std::string & option_value) {
  116. if (i + 1 >= argc) {
  117. return 1;
  118. }
  119. option_value = argv[++i];
  120. return 0;
  121. }
  122. int parse_options_with_value(int argc, const char ** argv, int & i, bool & options_parsing) {
  123. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  124. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  125. return 1;
  126. }
  127. } else if (options_parsing &&
  128. (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  129. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  130. return 1;
  131. }
  132. } else if (options_parsing && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--threads") == 0)) {
  133. if (handle_option_with_value(argc, argv, i, n_threads) == 1) {
  134. return 1;
  135. }
  136. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  137. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  138. return 1;
  139. }
  140. } else if (options_parsing && strcmp(argv[i], "--chat-template-file") == 0) {
  141. if (handle_option_with_value(argc, argv, i, chat_template_file) == 1) {
  142. return 1;
  143. }
  144. use_jinja = true;
  145. } else {
  146. return 2;
  147. }
  148. return 0;
  149. }
  150. int parse_options(const char ** argv, int & i, bool & options_parsing) {
  151. if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  152. verbose = true;
  153. } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) {
  154. use_jinja = true;
  155. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  156. help = true;
  157. return 0;
  158. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  159. options_parsing = false;
  160. } else {
  161. return 2;
  162. }
  163. return 0;
  164. }
  165. int parse_positional_args(const char ** argv, int & i, int & positional_args_i) {
  166. if (positional_args_i == 0) {
  167. if (!argv[i][0] || argv[i][0] == '-') {
  168. return 1;
  169. }
  170. ++positional_args_i;
  171. model_ = argv[i];
  172. } else if (positional_args_i == 1) {
  173. ++positional_args_i;
  174. user = argv[i];
  175. } else {
  176. user += " " + std::string(argv[i]);
  177. }
  178. return 0;
  179. }
  180. int parse(int argc, const char ** argv) {
  181. bool options_parsing = true;
  182. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  183. int ret = parse_options_with_value(argc, argv, i, options_parsing);
  184. if (ret == 0) {
  185. continue;
  186. } else if (ret == 1) {
  187. return ret;
  188. }
  189. ret = parse_options(argv, i, options_parsing);
  190. if (ret == 0) {
  191. continue;
  192. } else if (ret == 1) {
  193. return ret;
  194. }
  195. if (parse_positional_args(argv, i, positional_args_i)) {
  196. return 1;
  197. }
  198. }
  199. if (model_.empty()) {
  200. return 1;
  201. }
  202. return 0;
  203. }
  204. void print_help() const {
  205. printf(
  206. "Description:\n"
  207. " Runs a llm\n"
  208. "\n"
  209. "Usage:\n"
  210. " llama-run [options] model [prompt]\n"
  211. "\n"
  212. "Options:\n"
  213. " -c, --context-size <value>\n"
  214. " Context size (default: %d)\n"
  215. " --chat-template-file <path>\n"
  216. " Path to the file containing the chat template to use with the model.\n"
  217. " Only supports jinja templates and implicitly sets the --jinja flag.\n"
  218. " --jinja\n"
  219. " Use jinja templating for the chat template of the model\n"
  220. " -n, -ngl, --ngl <value>\n"
  221. " Number of GPU layers (default: %d)\n"
  222. " --temp <value>\n"
  223. " Temperature (default: %.1f)\n"
  224. " -t, --threads <value>\n"
  225. " Number of threads to use during generation (default: %d)\n"
  226. " -v, --verbose, --log-verbose\n"
  227. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  228. " -h, --help\n"
  229. " Show help message\n"
  230. "\n"
  231. "Commands:\n"
  232. " model\n"
  233. " Model is a string with an optional prefix of \n"
  234. " huggingface:// (hf://), modelscope:// (ms://), ollama://, https:// or file://.\n"
  235. " If no protocol is specified and a file exists in the specified\n"
  236. " path, file:// is assumed, otherwise if a file does not exist in\n"
  237. " the specified path, ollama:// is assumed. Models that are being\n"
  238. " pulled are downloaded with .partial extension while being\n"
  239. " downloaded and then renamed as the file without the .partial\n"
  240. " extension when complete.\n"
  241. "\n"
  242. "Examples:\n"
  243. " llama-run llama3\n"
  244. " llama-run ollama://granite-code\n"
  245. " llama-run ollama://smollm:135m\n"
  246. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  247. " llama-run "
  248. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  249. " llama-run ms://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  250. " llama-run "
  251. "modelscope://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  252. " llama-run https://example.com/some-file1.gguf\n"
  253. " llama-run some-file2.gguf\n"
  254. " llama-run file://some-file3.gguf\n"
  255. " llama-run --ngl 999 some-file4.gguf\n"
  256. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  257. context_size_default, ngl_default, temperature_default, n_threads_default);
  258. }
  259. };
  260. struct progress_data {
  261. size_t file_size = 0;
  262. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  263. bool printed = false;
  264. };
  265. static int get_terminal_width() {
  266. #if defined(_WIN32)
  267. CONSOLE_SCREEN_BUFFER_INFO csbi;
  268. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  269. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  270. #else
  271. struct winsize w;
  272. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  273. return w.ws_col;
  274. #endif
  275. }
  276. class File {
  277. public:
  278. FILE * file = nullptr;
  279. FILE * open(const std::string & filename, const char * mode) {
  280. file = ggml_fopen(filename.c_str(), mode);
  281. return file;
  282. }
  283. int lock() {
  284. if (file) {
  285. # ifdef _WIN32
  286. fd = _fileno(file);
  287. hFile = (HANDLE) _get_osfhandle(fd);
  288. if (hFile == INVALID_HANDLE_VALUE) {
  289. fd = -1;
  290. return 1;
  291. }
  292. OVERLAPPED overlapped = {};
  293. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  294. &overlapped)) {
  295. fd = -1;
  296. return 1;
  297. }
  298. # else
  299. fd = fileno(file);
  300. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  301. fd = -1;
  302. return 1;
  303. }
  304. # endif
  305. }
  306. return 0;
  307. }
  308. std::string to_string() {
  309. fseek(file, 0, SEEK_END);
  310. const size_t size = ftell(file);
  311. fseek(file, 0, SEEK_SET);
  312. std::string out;
  313. out.resize(size);
  314. const size_t read_size = fread(&out[0], 1, size, file);
  315. if (read_size != size) {
  316. printe("Error reading file: %s", strerror(errno));
  317. }
  318. return out;
  319. }
  320. ~File() {
  321. if (fd >= 0) {
  322. # ifdef _WIN32
  323. if (hFile != INVALID_HANDLE_VALUE) {
  324. OVERLAPPED overlapped = {};
  325. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  326. }
  327. # else
  328. flock(fd, LOCK_UN);
  329. # endif
  330. }
  331. if (file) {
  332. fclose(file);
  333. }
  334. }
  335. private:
  336. int fd = -1;
  337. # ifdef _WIN32
  338. HANDLE hFile = nullptr;
  339. # endif
  340. };
  341. #ifdef LLAMA_USE_CURL
  342. class HttpClient {
  343. public:
  344. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  345. const bool progress, std::string * response_str = nullptr) {
  346. if (std::filesystem::exists(output_file)) {
  347. return 0;
  348. }
  349. std::string output_file_partial;
  350. if (!output_file.empty()) {
  351. output_file_partial = output_file + ".partial";
  352. }
  353. if (download(url, headers, output_file_partial, progress, response_str)) {
  354. return 1;
  355. }
  356. if (!output_file.empty()) {
  357. try {
  358. std::filesystem::rename(output_file_partial, output_file);
  359. } catch (const std::filesystem::filesystem_error & e) {
  360. printe("Failed to rename '%s' to '%s': %s\n", output_file_partial.c_str(), output_file.c_str(), e.what());
  361. return 1;
  362. }
  363. }
  364. return 0;
  365. }
  366. ~HttpClient() {
  367. if (chunk) {
  368. curl_slist_free_all(chunk);
  369. }
  370. if (curl) {
  371. curl_easy_cleanup(curl);
  372. }
  373. }
  374. private:
  375. CURL * curl = nullptr;
  376. struct curl_slist * chunk = nullptr;
  377. int download(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  378. const bool progress, std::string * response_str = nullptr) {
  379. curl = curl_easy_init();
  380. if (!curl) {
  381. return 1;
  382. }
  383. progress_data data;
  384. File out;
  385. if (!output_file.empty()) {
  386. if (!out.open(output_file, "ab")) {
  387. printe("Failed to open file for writing\n");
  388. return 1;
  389. }
  390. if (out.lock()) {
  391. printe("Failed to exclusively lock file\n");
  392. return 1;
  393. }
  394. }
  395. set_write_options(response_str, out);
  396. data.file_size = set_resume_point(output_file);
  397. set_progress_options(progress, data);
  398. set_headers(headers);
  399. CURLcode res = perform(url);
  400. if (res != CURLE_OK){
  401. printe("Fetching resource '%s' failed: %s\n", url.c_str(), curl_easy_strerror(res));
  402. return 1;
  403. }
  404. return 0;
  405. }
  406. void set_write_options(std::string * response_str, const File & out) {
  407. if (response_str) {
  408. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  409. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  410. } else {
  411. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  412. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  413. }
  414. }
  415. size_t set_resume_point(const std::string & output_file) {
  416. size_t file_size = 0;
  417. if (std::filesystem::exists(output_file)) {
  418. file_size = std::filesystem::file_size(output_file);
  419. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  420. }
  421. return file_size;
  422. }
  423. void set_progress_options(bool progress, progress_data & data) {
  424. if (progress) {
  425. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  426. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  427. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  428. }
  429. }
  430. void set_headers(const std::vector<std::string> & headers) {
  431. if (!headers.empty()) {
  432. if (chunk) {
  433. curl_slist_free_all(chunk);
  434. chunk = 0;
  435. }
  436. for (const auto & header : headers) {
  437. chunk = curl_slist_append(chunk, header.c_str());
  438. }
  439. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  440. }
  441. }
  442. CURLcode perform(const std::string & url) {
  443. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  444. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  445. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  446. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  447. #ifdef _WIN32
  448. curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  449. #endif
  450. return curl_easy_perform(curl);
  451. }
  452. static std::string human_readable_time(double seconds) {
  453. int hrs = static_cast<int>(seconds) / 3600;
  454. int mins = (static_cast<int>(seconds) % 3600) / 60;
  455. int secs = static_cast<int>(seconds) % 60;
  456. if (hrs > 0) {
  457. return string_format("%dh %02dm %02ds", hrs, mins, secs);
  458. } else if (mins > 0) {
  459. return string_format("%dm %02ds", mins, secs);
  460. } else {
  461. return string_format("%ds", secs);
  462. }
  463. }
  464. static std::string human_readable_size(curl_off_t size) {
  465. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  466. char length = sizeof(suffix) / sizeof(suffix[0]);
  467. int i = 0;
  468. double dbl_size = size;
  469. if (size > 1024) {
  470. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  471. dbl_size = size / 1024.0;
  472. }
  473. }
  474. return string_format("%.2f %s", dbl_size, suffix[i]);
  475. }
  476. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  477. curl_off_t) {
  478. progress_data * data = static_cast<progress_data *>(ptr);
  479. if (total_to_download <= 0) {
  480. return 0;
  481. }
  482. total_to_download += data->file_size;
  483. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  484. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  485. std::string progress_prefix = generate_progress_prefix(percentage);
  486. const double speed = calculate_speed(now_downloaded, data->start_time);
  487. const double tim = (total_to_download - now_downloaded) / speed;
  488. std::string progress_suffix =
  489. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  490. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  491. std::string progress_bar;
  492. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  493. print_progress(progress_prefix, progress_bar, progress_suffix);
  494. data->printed = true;
  495. return 0;
  496. }
  497. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  498. return (now_downloaded_plus_file_size * 100) / total_to_download;
  499. }
  500. static std::string generate_progress_prefix(curl_off_t percentage) {
  501. return string_format("%3ld%% |", static_cast<long int>(percentage));
  502. }
  503. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  504. const auto now = std::chrono::steady_clock::now();
  505. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  506. return now_downloaded / elapsed_seconds.count();
  507. }
  508. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  509. double speed, double estimated_time) {
  510. const int width = 10;
  511. return string_format("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(),
  512. width, human_readable_size(total_to_download).c_str(), width,
  513. human_readable_size(speed).c_str(), width, human_readable_time(estimated_time).c_str());
  514. }
  515. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  516. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  517. if (progress_bar_width < 1) {
  518. progress_bar_width = 1;
  519. }
  520. return progress_bar_width;
  521. }
  522. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  523. std::string & progress_bar) {
  524. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  525. for (int i = 0; i < progress_bar_width; ++i) {
  526. progress_bar.append((i < pos) ? "█" : " ");
  527. }
  528. return progress_bar;
  529. }
  530. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  531. const std::string & progress_suffix) {
  532. printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
  533. }
  534. // Function to write data to a file
  535. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  536. FILE * out = static_cast<FILE *>(stream);
  537. return fwrite(ptr, size, nmemb, out);
  538. }
  539. // Function to capture data into a string
  540. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  541. std::string * str = static_cast<std::string *>(stream);
  542. str->append(static_cast<char *>(ptr), size * nmemb);
  543. return size * nmemb;
  544. }
  545. };
  546. #endif
  547. class LlamaData {
  548. public:
  549. llama_model_ptr model;
  550. llama_sampler_ptr sampler;
  551. llama_context_ptr context;
  552. std::vector<llama_chat_message> messages; // TODO: switch to common_chat_msg
  553. std::list<std::string> msg_strs;
  554. std::vector<char> fmtted;
  555. int init(Opt & opt) {
  556. model = initialize_model(opt);
  557. if (!model) {
  558. return 1;
  559. }
  560. context = initialize_context(model, opt);
  561. if (!context) {
  562. return 1;
  563. }
  564. sampler = initialize_sampler(opt);
  565. return 0;
  566. }
  567. private:
  568. #ifdef LLAMA_USE_CURL
  569. int download(const std::string & url, const std::string & output_file, const bool progress,
  570. const std::vector<std::string> & headers = {}, std::string * response_str = nullptr) {
  571. HttpClient http;
  572. if (http.init(url, headers, output_file, progress, response_str)) {
  573. return 1;
  574. }
  575. return 0;
  576. }
  577. #else
  578. int download(const std::string &, const std::string &, const bool, const std::vector<std::string> & = {},
  579. std::string * = nullptr) {
  580. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  581. return 1;
  582. }
  583. #endif
  584. // Helper function to handle model tag extraction and URL construction
  585. std::pair<std::string, std::string> extract_model_and_tag(std::string & model, const std::string & base_url) {
  586. std::string model_tag = "latest";
  587. const size_t colon_pos = model.find(':');
  588. if (colon_pos != std::string::npos) {
  589. model_tag = model.substr(colon_pos + 1);
  590. model = model.substr(0, colon_pos);
  591. }
  592. std::string url = base_url + model + "/manifests/" + model_tag;
  593. return { model, url };
  594. }
  595. // Helper function to download and parse the manifest
  596. int download_and_parse_manifest(const std::string & url, const std::vector<std::string> & headers,
  597. nlohmann::json & manifest) {
  598. std::string manifest_str;
  599. int ret = download(url, "", false, headers, &manifest_str);
  600. if (ret) {
  601. return ret;
  602. }
  603. manifest = nlohmann::json::parse(manifest_str);
  604. return 0;
  605. }
  606. int dl_from_endpoint(std::string & model_endpoint, std::string & model, const std::string & bn) {
  607. // Find the second occurrence of '/' after protocol string
  608. size_t pos = model.find('/');
  609. pos = model.find('/', pos + 1);
  610. std::string hfr, hff;
  611. std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" };
  612. std::string url;
  613. if (pos == std::string::npos) {
  614. auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + "v2/");
  615. hfr = model_name;
  616. nlohmann::json manifest;
  617. int ret = download_and_parse_manifest(manifest_url, headers, manifest);
  618. if (ret) {
  619. return ret;
  620. }
  621. hff = manifest["ggufFile"]["rfilename"];
  622. } else {
  623. hfr = model.substr(0, pos);
  624. hff = model.substr(pos + 1);
  625. }
  626. url = model_endpoint + hfr + "/resolve/main/" + hff;
  627. return download(url, bn, true, headers);
  628. }
  629. int modelscope_dl(std::string & model, const std::string & bn) {
  630. std::string model_endpoint = "https://modelscope.cn/models/";
  631. return dl_from_endpoint(model_endpoint, model, bn);
  632. }
  633. int huggingface_dl(std::string & model, const std::string & bn) {
  634. std::string model_endpoint = get_model_endpoint();
  635. return dl_from_endpoint(model_endpoint, model, bn);
  636. }
  637. int ollama_dl(std::string & model, const std::string & bn) {
  638. const std::vector<std::string> headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  639. if (model.find('/') == std::string::npos) {
  640. model = "library/" + model;
  641. }
  642. auto [model_name, manifest_url] = extract_model_and_tag(model, "https://registry.ollama.ai/v2/");
  643. nlohmann::json manifest;
  644. int ret = download_and_parse_manifest(manifest_url, {}, manifest);
  645. if (ret) {
  646. return ret;
  647. }
  648. std::string layer;
  649. for (const auto & l : manifest["layers"]) {
  650. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  651. layer = l["digest"];
  652. break;
  653. }
  654. }
  655. std::string blob_url = "https://registry.ollama.ai/v2/" + model_name + "/blobs/" + layer;
  656. return download(blob_url, bn, true, headers);
  657. }
  658. int github_dl(const std::string & model, const std::string & bn) {
  659. std::string repository = model;
  660. std::string branch = "main";
  661. const size_t at_pos = model.find('@');
  662. if (at_pos != std::string::npos) {
  663. repository = model.substr(0, at_pos);
  664. branch = model.substr(at_pos + 1);
  665. }
  666. const std::vector<std::string> repo_parts = string_split(repository, "/");
  667. if (repo_parts.size() < 3) {
  668. printe("Invalid GitHub repository format\n");
  669. return 1;
  670. }
  671. const std::string & org = repo_parts[0];
  672. const std::string & project = repo_parts[1];
  673. std::string url = "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch;
  674. for (size_t i = 2; i < repo_parts.size(); ++i) {
  675. url += "/" + repo_parts[i];
  676. }
  677. return download(url, bn, true);
  678. }
  679. int s3_dl(const std::string & model, const std::string & bn) {
  680. const size_t slash_pos = model.find('/');
  681. if (slash_pos == std::string::npos) {
  682. return 1;
  683. }
  684. const std::string bucket = model.substr(0, slash_pos);
  685. const std::string key = model.substr(slash_pos + 1);
  686. const char * access_key = std::getenv("AWS_ACCESS_KEY_ID");
  687. const char * secret_key = std::getenv("AWS_SECRET_ACCESS_KEY");
  688. if (!access_key || !secret_key) {
  689. printe("AWS credentials not found in environment\n");
  690. return 1;
  691. }
  692. // Generate AWS Signature Version 4 headers
  693. // (Implementation requires HMAC-SHA256 and date handling)
  694. // Get current timestamp
  695. const time_t now = time(nullptr);
  696. const tm tm = *gmtime(&now);
  697. const std::string date = strftime_fmt("%Y%m%d", tm);
  698. const std::string datetime = strftime_fmt("%Y%m%dT%H%M%SZ", tm);
  699. const std::vector<std::string> headers = {
  700. "Authorization: AWS4-HMAC-SHA256 Credential=" + std::string(access_key) + "/" + date +
  701. "/us-east-1/s3/aws4_request",
  702. "x-amz-content-sha256: UNSIGNED-PAYLOAD", "x-amz-date: " + datetime
  703. };
  704. const std::string url = "https://" + bucket + ".s3.amazonaws.com/" + key;
  705. return download(url, bn, true, headers);
  706. }
  707. std::string basename(const std::string & path) {
  708. const size_t pos = path.find_last_of("/\\");
  709. if (pos == std::string::npos) {
  710. return path;
  711. }
  712. return path.substr(pos + 1);
  713. }
  714. int rm_until_substring(std::string & model_, const std::string & substring) {
  715. const std::string::size_type pos = model_.find(substring);
  716. if (pos == std::string::npos) {
  717. return 1;
  718. }
  719. model_ = model_.substr(pos + substring.size()); // Skip past the substring
  720. return 0;
  721. }
  722. int resolve_model(std::string & model_) {
  723. int ret = 0;
  724. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  725. rm_until_substring(model_, "://");
  726. return ret;
  727. }
  728. const std::string bn = basename(model_);
  729. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://") ||
  730. string_starts_with(model_, "hf.co/")) {
  731. rm_until_substring(model_, "hf.co/");
  732. rm_until_substring(model_, "://");
  733. ret = huggingface_dl(model_, bn);
  734. } else if (string_starts_with(model_, "ms://") || string_starts_with(model_, "modelscope://")) {
  735. rm_until_substring(model_, "://");
  736. ret = modelscope_dl(model_, bn);
  737. } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) &&
  738. !string_starts_with(model_, "https://ollama.com/library/")) {
  739. ret = download(model_, bn, true);
  740. } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) {
  741. rm_until_substring(model_, "github:");
  742. rm_until_substring(model_, "://");
  743. ret = github_dl(model_, bn);
  744. } else if (string_starts_with(model_, "s3://")) {
  745. rm_until_substring(model_, "://");
  746. ret = s3_dl(model_, bn);
  747. } else { // ollama:// or nothing
  748. rm_until_substring(model_, "ollama.com/library/");
  749. rm_until_substring(model_, "://");
  750. ret = ollama_dl(model_, bn);
  751. }
  752. model_ = bn;
  753. return ret;
  754. }
  755. // Initializes the model and returns a unique pointer to it
  756. llama_model_ptr initialize_model(Opt & opt) {
  757. ggml_backend_load_all();
  758. resolve_model(opt.model_);
  759. printe("\r" LOG_CLR_TO_EOL "Loading model");
  760. llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
  761. if (!model) {
  762. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  763. }
  764. printe("\r" LOG_CLR_TO_EOL);
  765. return model;
  766. }
  767. // Initializes the context with the specified parameters
  768. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  769. llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params));
  770. if (!context) {
  771. printe("%s: error: failed to create the llama_context\n", __func__);
  772. }
  773. return context;
  774. }
  775. // Initializes and configures the sampler
  776. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  777. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  778. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  779. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  780. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  781. return sampler;
  782. }
  783. };
  784. // Add a message to `messages` and store its content in `msg_strs`
  785. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  786. llama_data.msg_strs.push_back(std::move(text));
  787. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  788. }
  789. // Function to apply the chat template and resize `formatted` if needed
  790. static int apply_chat_template(const struct common_chat_templates * tmpls, LlamaData & llama_data, const bool append, bool use_jinja) {
  791. common_chat_templates_inputs inputs;
  792. for (const auto & msg : llama_data.messages) {
  793. common_chat_msg cmsg;
  794. cmsg.role = msg.role;
  795. cmsg.content = msg.content;
  796. inputs.messages.push_back(cmsg);
  797. }
  798. inputs.add_generation_prompt = append;
  799. inputs.use_jinja = use_jinja;
  800. auto chat_params = common_chat_templates_apply(tmpls, inputs);
  801. // TODO: use other params for tool calls.
  802. auto result = chat_params.prompt;
  803. llama_data.fmtted.resize(result.size() + 1);
  804. memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
  805. return result.size();
  806. }
  807. // Function to tokenize the prompt
  808. static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
  809. std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
  810. const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == -1;
  811. int n_tokens = prompt.size() + 2 * is_first;
  812. prompt_tokens.resize(n_tokens);
  813. n_tokens = llama_tokenize(vocab, prompt.c_str(), prompt.size(),
  814. prompt_tokens.data(), prompt_tokens.size(),
  815. is_first, /*parse_special =*/true);
  816. if (n_tokens == std::numeric_limits<int32_t>::min()) {
  817. printe("tokenization failed: input too large\n");
  818. return -1;
  819. }
  820. if (n_tokens < 0) {
  821. prompt_tokens.resize(-n_tokens);
  822. int check = llama_tokenize(vocab, prompt.c_str(), prompt.size(),
  823. prompt_tokens.data(), prompt_tokens.size(),
  824. is_first, /*parse_special =*/true);
  825. if (check != -n_tokens) {
  826. printe("failed to tokenize the prompt (size mismatch)\n");
  827. return -1;
  828. }
  829. n_tokens = check;
  830. } else {
  831. prompt_tokens.resize(n_tokens);
  832. }
  833. return n_tokens;
  834. }
  835. // Check if we have enough space in the context to evaluate this batch
  836. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  837. const int n_ctx = llama_n_ctx(ctx.get());
  838. const int n_ctx_used = llama_memory_seq_pos_max(llama_get_memory(ctx.get()), 0);
  839. if (n_ctx_used + batch.n_tokens > n_ctx) {
  840. printf(LOG_COL_DEFAULT "\n");
  841. printe("context size exceeded\n");
  842. return 1;
  843. }
  844. return 0;
  845. }
  846. // convert the token to a string
  847. static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) {
  848. char buf[256];
  849. int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true);
  850. if (n < 0) {
  851. printe("failed to convert token to piece\n");
  852. return 1;
  853. }
  854. piece = std::string(buf, n);
  855. return 0;
  856. }
  857. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  858. printf("%s", piece.c_str());
  859. fflush(stdout);
  860. response += piece;
  861. }
  862. // helper function to evaluate a prompt and generate a response
  863. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  864. const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get());
  865. std::vector<llama_token> tokens;
  866. if (tokenize_prompt(vocab, prompt, tokens, llama_data) < 0) {
  867. return 1;
  868. }
  869. // prepare a batch for the prompt
  870. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  871. llama_token new_token_id;
  872. while (true) {
  873. check_context_size(llama_data.context, batch);
  874. if (llama_decode(llama_data.context.get(), batch)) {
  875. printe("failed to decode\n");
  876. return 1;
  877. }
  878. // sample the next token, check is it an end of generation?
  879. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  880. if (llama_vocab_is_eog(vocab, new_token_id)) {
  881. break;
  882. }
  883. std::string piece;
  884. if (convert_token_to_string(vocab, new_token_id, piece)) {
  885. return 1;
  886. }
  887. print_word_and_concatenate_to_response(piece, response);
  888. // prepare the next batch with the sampled token
  889. batch = llama_batch_get_one(&new_token_id, 1);
  890. }
  891. printf(LOG_COL_DEFAULT);
  892. return 0;
  893. }
  894. static int read_user_input(std::string & user_input) {
  895. static const char * prompt_prefix_env = std::getenv("LLAMA_PROMPT_PREFIX");
  896. static const char * prompt_prefix = prompt_prefix_env ? prompt_prefix_env : "> ";
  897. #ifdef WIN32
  898. printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
  899. std::getline(std::cin, user_input);
  900. if (std::cin.eof()) {
  901. printf("\n");
  902. return 1;
  903. }
  904. #else
  905. std::unique_ptr<char, decltype(&std::free)> line(const_cast<char *>(linenoise(prompt_prefix)), free);
  906. if (!line) {
  907. return 1;
  908. }
  909. user_input = line.get();
  910. #endif
  911. if (user_input == "/bye") {
  912. return 1;
  913. }
  914. if (user_input.empty()) {
  915. return 2;
  916. }
  917. #ifndef WIN32
  918. linenoiseHistoryAdd(line.get());
  919. #endif
  920. return 0; // Should have data in happy path
  921. }
  922. // Function to generate a response based on the prompt
  923. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  924. const bool stdout_a_terminal) {
  925. // Set response color
  926. if (stdout_a_terminal) {
  927. printf(LOG_COL_YELLOW);
  928. }
  929. if (generate(llama_data, prompt, response)) {
  930. printe("failed to generate response\n");
  931. return 1;
  932. }
  933. // End response with color reset and newline
  934. printf("\n%s", stdout_a_terminal ? LOG_COL_DEFAULT : "");
  935. return 0;
  936. }
  937. // Helper function to apply the chat template and handle errors
  938. static int apply_chat_template_with_error_handling(const common_chat_templates * tmpls, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
  939. const int new_len = apply_chat_template(tmpls, llama_data, append, use_jinja);
  940. if (new_len < 0) {
  941. printe("failed to apply the chat template\n");
  942. return -1;
  943. }
  944. output_length = new_len;
  945. return 0;
  946. }
  947. // Helper function to handle user input
  948. static int handle_user_input(std::string & user_input, const std::string & user) {
  949. if (!user.empty()) {
  950. user_input = user;
  951. return 0; // No need for interactive input
  952. }
  953. return read_user_input(user_input); // Returns true if input ends the loop
  954. }
  955. static bool is_stdin_a_terminal() {
  956. #if defined(_WIN32)
  957. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  958. DWORD mode;
  959. return GetConsoleMode(hStdin, &mode);
  960. #else
  961. return isatty(STDIN_FILENO);
  962. #endif
  963. }
  964. static bool is_stdout_a_terminal() {
  965. #if defined(_WIN32)
  966. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  967. DWORD mode;
  968. return GetConsoleMode(hStdout, &mode);
  969. #else
  970. return isatty(STDOUT_FILENO);
  971. #endif
  972. }
  973. // Function to handle user input
  974. static int get_user_input(std::string & user_input, const std::string & user) {
  975. while (true) {
  976. const int ret = handle_user_input(user_input, user);
  977. if (ret == 1) {
  978. return 1;
  979. }
  980. if (ret == 2) {
  981. continue;
  982. }
  983. break;
  984. }
  985. return 0;
  986. }
  987. // Reads a chat template file to be used
  988. static std::string read_chat_template_file(const std::string & chat_template_file) {
  989. File file;
  990. if (!file.open(chat_template_file, "r")) {
  991. printe("Error opening chat template file '%s': %s", chat_template_file.c_str(), strerror(errno));
  992. return "";
  993. }
  994. return file.to_string();
  995. }
  996. static int process_user_message(const Opt & opt, const std::string & user_input, LlamaData & llama_data,
  997. const common_chat_templates_ptr & chat_templates, int & prev_len,
  998. const bool stdout_a_terminal) {
  999. add_message("user", opt.user.empty() ? user_input : opt.user, llama_data);
  1000. int new_len;
  1001. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, true, new_len, opt.use_jinja) < 0) {
  1002. return 1;
  1003. }
  1004. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  1005. std::string response;
  1006. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  1007. return 1;
  1008. }
  1009. if (!opt.user.empty()) {
  1010. return 2;
  1011. }
  1012. add_message("assistant", response, llama_data);
  1013. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, false, prev_len, opt.use_jinja) < 0) {
  1014. return 1;
  1015. }
  1016. return 0;
  1017. }
  1018. // Main chat loop function
  1019. static int chat_loop(LlamaData & llama_data, const Opt & opt) {
  1020. int prev_len = 0;
  1021. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  1022. std::string chat_template;
  1023. if (!opt.chat_template_file.empty()) {
  1024. chat_template = read_chat_template_file(opt.chat_template_file);
  1025. }
  1026. common_chat_templates_ptr chat_templates = common_chat_templates_init(llama_data.model.get(), chat_template);
  1027. static const bool stdout_a_terminal = is_stdout_a_terminal();
  1028. while (true) {
  1029. // Get user input
  1030. std::string user_input;
  1031. if (get_user_input(user_input, opt.user) == 1) {
  1032. return 0;
  1033. }
  1034. const int ret = process_user_message(opt, user_input, llama_data, chat_templates, prev_len, stdout_a_terminal);
  1035. if (ret == 1) {
  1036. return 1;
  1037. } else if (ret == 2) {
  1038. break;
  1039. }
  1040. }
  1041. return 0;
  1042. }
  1043. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  1044. const Opt * opt = static_cast<Opt *>(p);
  1045. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  1046. printe("%s", text);
  1047. }
  1048. }
  1049. static std::string read_pipe_data() {
  1050. std::ostringstream result;
  1051. result << std::cin.rdbuf(); // Read all data from std::cin
  1052. return result.str();
  1053. }
  1054. static void ctrl_c_handling() {
  1055. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  1056. struct sigaction sigint_action;
  1057. sigint_action.sa_handler = sigint_handler;
  1058. sigemptyset(&sigint_action.sa_mask);
  1059. sigint_action.sa_flags = 0;
  1060. sigaction(SIGINT, &sigint_action, NULL);
  1061. #elif defined(_WIN32)
  1062. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  1063. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  1064. };
  1065. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  1066. #endif
  1067. }
  1068. int main(int argc, const char ** argv) {
  1069. ctrl_c_handling();
  1070. Opt opt;
  1071. const int ret = opt.init(argc, argv);
  1072. if (ret == 2) {
  1073. return 0;
  1074. } else if (ret) {
  1075. return 1;
  1076. }
  1077. if (!is_stdin_a_terminal()) {
  1078. if (!opt.user.empty()) {
  1079. opt.user += "\n\n";
  1080. }
  1081. opt.user += read_pipe_data();
  1082. }
  1083. llama_log_set(log_callback, &opt);
  1084. LlamaData llama_data;
  1085. if (llama_data.init(opt)) {
  1086. return 1;
  1087. }
  1088. if (chat_loop(llama_data, opt)) {
  1089. return 1;
  1090. }
  1091. return 0;
  1092. }