run.cpp 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. #include "chat.h"
  2. #include "common.h"
  3. #include "llama-cpp.h"
  4. #include "log.h"
  5. #include "linenoise.cpp/linenoise.h"
  6. #define JSON_ASSERT GGML_ASSERT
  7. #include <nlohmann/json.hpp>
  8. #if defined(_WIN32)
  9. # define WIN32_LEAN_AND_MEAN
  10. # ifndef NOMINMAX
  11. # define NOMINMAX
  12. # endif
  13. # include <windows.h>
  14. # include <io.h>
  15. #else
  16. # include <sys/file.h>
  17. # include <sys/ioctl.h>
  18. # include <unistd.h>
  19. #endif
  20. #if defined(LLAMA_USE_CURL)
  21. # include <curl/curl.h>
  22. #else
  23. # include "http.h"
  24. #endif
  25. #include <signal.h>
  26. #include <climits>
  27. #include <cstdarg>
  28. #include <cstdio>
  29. #include <cstring>
  30. #include <filesystem>
  31. #include <iostream>
  32. #include <list>
  33. #include <sstream>
  34. #include <string>
  35. #include <vector>
  36. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
  37. [[noreturn]] static void sigint_handler(int) {
  38. printf("\n" LOG_COL_DEFAULT);
  39. exit(0); // not ideal, but it's the only way to guarantee exit in all cases
  40. }
  41. #endif
  42. GGML_ATTRIBUTE_FORMAT(1, 2)
  43. static int printe(const char * fmt, ...) {
  44. va_list args;
  45. va_start(args, fmt);
  46. const int ret = vfprintf(stderr, fmt, args);
  47. va_end(args);
  48. return ret;
  49. }
  50. static std::string strftime_fmt(const char * fmt, const std::tm & tm) {
  51. std::ostringstream oss;
  52. oss << std::put_time(&tm, fmt);
  53. return oss.str();
  54. }
  55. class Opt {
  56. public:
  57. int init(int argc, const char ** argv) {
  58. ctx_params = llama_context_default_params();
  59. model_params = llama_model_default_params();
  60. context_size_default = ctx_params.n_batch;
  61. n_threads_default = ctx_params.n_threads;
  62. ngl_default = model_params.n_gpu_layers;
  63. common_params_sampling sampling;
  64. temperature_default = sampling.temp;
  65. if (argc < 2) {
  66. printe("Error: No arguments provided.\n");
  67. print_help();
  68. return 1;
  69. }
  70. // Parse arguments
  71. if (parse(argc, argv)) {
  72. printe("Error: Failed to parse arguments.\n");
  73. print_help();
  74. return 1;
  75. }
  76. // If help is requested, show help and exit
  77. if (help) {
  78. print_help();
  79. return 2;
  80. }
  81. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  82. ctx_params.n_ctx = ctx_params.n_batch;
  83. ctx_params.n_threads = ctx_params.n_threads_batch = n_threads >= 0 ? n_threads : n_threads_default;
  84. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  85. temperature = temperature >= 0 ? temperature : temperature_default;
  86. return 0; // Success
  87. }
  88. llama_context_params ctx_params;
  89. llama_model_params model_params;
  90. std::string model_;
  91. std::string chat_template_file;
  92. std::string user;
  93. bool use_jinja = false;
  94. int context_size = -1, ngl = -1, n_threads = -1;
  95. float temperature = -1;
  96. bool verbose = false;
  97. private:
  98. int context_size_default = -1, ngl_default = -1, n_threads_default = -1;
  99. float temperature_default = -1;
  100. bool help = false;
  101. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  102. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  103. }
  104. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  105. if (i + 1 >= argc) {
  106. return 1;
  107. }
  108. option_value = std::atoi(argv[++i]);
  109. return 0;
  110. }
  111. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  112. if (i + 1 >= argc) {
  113. return 1;
  114. }
  115. option_value = std::atof(argv[++i]);
  116. return 0;
  117. }
  118. int handle_option_with_value(int argc, const char ** argv, int & i, std::string & option_value) {
  119. if (i + 1 >= argc) {
  120. return 1;
  121. }
  122. option_value = argv[++i];
  123. return 0;
  124. }
  125. int parse_options_with_value(int argc, const char ** argv, int & i, bool & options_parsing) {
  126. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  127. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  128. return 1;
  129. }
  130. } else if (options_parsing &&
  131. (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  132. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  133. return 1;
  134. }
  135. } else if (options_parsing && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--threads") == 0)) {
  136. if (handle_option_with_value(argc, argv, i, n_threads) == 1) {
  137. return 1;
  138. }
  139. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  140. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  141. return 1;
  142. }
  143. } else if (options_parsing && strcmp(argv[i], "--chat-template-file") == 0) {
  144. if (handle_option_with_value(argc, argv, i, chat_template_file) == 1) {
  145. return 1;
  146. }
  147. use_jinja = true;
  148. } else {
  149. return 2;
  150. }
  151. return 0;
  152. }
  153. int parse_options(const char ** argv, int & i, bool & options_parsing) {
  154. if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  155. verbose = true;
  156. } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) {
  157. use_jinja = true;
  158. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  159. help = true;
  160. return 0;
  161. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  162. options_parsing = false;
  163. } else {
  164. return 2;
  165. }
  166. return 0;
  167. }
  168. int parse_positional_args(const char ** argv, int & i, int & positional_args_i) {
  169. if (positional_args_i == 0) {
  170. if (!argv[i][0] || argv[i][0] == '-') {
  171. return 1;
  172. }
  173. ++positional_args_i;
  174. model_ = argv[i];
  175. } else if (positional_args_i == 1) {
  176. ++positional_args_i;
  177. user = argv[i];
  178. } else {
  179. user += " " + std::string(argv[i]);
  180. }
  181. return 0;
  182. }
  183. int parse(int argc, const char ** argv) {
  184. bool options_parsing = true;
  185. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  186. int ret = parse_options_with_value(argc, argv, i, options_parsing);
  187. if (ret == 0) {
  188. continue;
  189. } else if (ret == 1) {
  190. return ret;
  191. }
  192. ret = parse_options(argv, i, options_parsing);
  193. if (ret == 0) {
  194. continue;
  195. } else if (ret == 1) {
  196. return ret;
  197. }
  198. if (parse_positional_args(argv, i, positional_args_i)) {
  199. return 1;
  200. }
  201. }
  202. if (model_.empty()) {
  203. return 1;
  204. }
  205. return 0;
  206. }
  207. void print_help() const {
  208. printf(
  209. "Description:\n"
  210. " Runs a llm\n"
  211. "\n"
  212. "Usage:\n"
  213. " llama-run [options] model [prompt]\n"
  214. "\n"
  215. "Options:\n"
  216. " -c, --context-size <value>\n"
  217. " Context size (default: %d)\n"
  218. " --chat-template-file <path>\n"
  219. " Path to the file containing the chat template to use with the model.\n"
  220. " Only supports jinja templates and implicitly sets the --jinja flag.\n"
  221. " --jinja\n"
  222. " Use jinja templating for the chat template of the model\n"
  223. " -n, -ngl, --ngl <value>\n"
  224. " Number of GPU layers (default: %d)\n"
  225. " --temp <value>\n"
  226. " Temperature (default: %.1f)\n"
  227. " -t, --threads <value>\n"
  228. " Number of threads to use during generation (default: %d)\n"
  229. " -v, --verbose, --log-verbose\n"
  230. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  231. " -h, --help\n"
  232. " Show help message\n"
  233. "\n"
  234. "Commands:\n"
  235. " model\n"
  236. " Model is a string with an optional prefix of \n"
  237. " huggingface:// (hf://), modelscope:// (ms://), ollama://, https:// or file://.\n"
  238. " If no protocol is specified and a file exists in the specified\n"
  239. " path, file:// is assumed, otherwise if a file does not exist in\n"
  240. " the specified path, ollama:// is assumed. Models that are being\n"
  241. " pulled are downloaded with .partial extension while being\n"
  242. " downloaded and then renamed as the file without the .partial\n"
  243. " extension when complete.\n"
  244. "\n"
  245. "Examples:\n"
  246. " llama-run llama3\n"
  247. " llama-run ollama://granite-code\n"
  248. " llama-run ollama://smollm:135m\n"
  249. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  250. " llama-run "
  251. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  252. " llama-run ms://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  253. " llama-run "
  254. "modelscope://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  255. " llama-run https://example.com/some-file1.gguf\n"
  256. " llama-run some-file2.gguf\n"
  257. " llama-run file://some-file3.gguf\n"
  258. " llama-run --ngl 999 some-file4.gguf\n"
  259. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  260. context_size_default, ngl_default, temperature_default, n_threads_default);
  261. }
  262. };
  263. struct progress_data {
  264. size_t file_size = 0;
  265. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  266. bool printed = false;
  267. };
  268. static int get_terminal_width() {
  269. #if defined(_WIN32)
  270. CONSOLE_SCREEN_BUFFER_INFO csbi;
  271. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  272. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  273. #else
  274. struct winsize w;
  275. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  276. return w.ws_col;
  277. #endif
  278. }
  279. class File {
  280. public:
  281. FILE * file = nullptr;
  282. FILE * open(const std::string & filename, const char * mode) {
  283. file = ggml_fopen(filename.c_str(), mode);
  284. return file;
  285. }
  286. int lock() {
  287. if (file) {
  288. # ifdef _WIN32
  289. fd = _fileno(file);
  290. hFile = (HANDLE) _get_osfhandle(fd);
  291. if (hFile == INVALID_HANDLE_VALUE) {
  292. fd = -1;
  293. return 1;
  294. }
  295. OVERLAPPED overlapped = {};
  296. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  297. &overlapped)) {
  298. fd = -1;
  299. return 1;
  300. }
  301. # else
  302. fd = fileno(file);
  303. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  304. fd = -1;
  305. return 1;
  306. }
  307. # endif
  308. }
  309. return 0;
  310. }
  311. std::string to_string() {
  312. fseek(file, 0, SEEK_END);
  313. const size_t size = ftell(file);
  314. fseek(file, 0, SEEK_SET);
  315. std::string out;
  316. out.resize(size);
  317. const size_t read_size = fread(&out[0], 1, size, file);
  318. if (read_size != size) {
  319. printe("Error reading file: %s", strerror(errno));
  320. }
  321. return out;
  322. }
  323. ~File() {
  324. if (fd >= 0) {
  325. # ifdef _WIN32
  326. if (hFile != INVALID_HANDLE_VALUE) {
  327. OVERLAPPED overlapped = {};
  328. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  329. }
  330. # else
  331. flock(fd, LOCK_UN);
  332. # endif
  333. }
  334. if (file) {
  335. fclose(file);
  336. }
  337. }
  338. private:
  339. int fd = -1;
  340. # ifdef _WIN32
  341. HANDLE hFile = nullptr;
  342. # endif
  343. };
  344. class HttpClient {
  345. public:
  346. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  347. const bool progress, std::string * response_str = nullptr) {
  348. if (std::filesystem::exists(output_file)) {
  349. return 0;
  350. }
  351. std::string output_file_partial;
  352. if (!output_file.empty()) {
  353. output_file_partial = output_file + ".partial";
  354. }
  355. if (download(url, headers, output_file_partial, progress, response_str)) {
  356. return 1;
  357. }
  358. if (!output_file.empty()) {
  359. try {
  360. std::filesystem::rename(output_file_partial, output_file);
  361. } catch (const std::filesystem::filesystem_error & e) {
  362. printe("Failed to rename '%s' to '%s': %s\n", output_file_partial.c_str(), output_file.c_str(), e.what());
  363. return 1;
  364. }
  365. }
  366. return 0;
  367. }
  368. #ifdef LLAMA_USE_CURL
  369. ~HttpClient() {
  370. if (chunk) {
  371. curl_slist_free_all(chunk);
  372. }
  373. if (curl) {
  374. curl_easy_cleanup(curl);
  375. }
  376. }
  377. private:
  378. CURL * curl = nullptr;
  379. struct curl_slist * chunk = nullptr;
  380. int download(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  381. const bool progress, std::string * response_str = nullptr) {
  382. curl = curl_easy_init();
  383. if (!curl) {
  384. return 1;
  385. }
  386. progress_data data;
  387. File out;
  388. if (!output_file.empty()) {
  389. if (!out.open(output_file, "ab")) {
  390. printe("Failed to open file for writing\n");
  391. return 1;
  392. }
  393. if (out.lock()) {
  394. printe("Failed to exclusively lock file\n");
  395. return 1;
  396. }
  397. }
  398. set_write_options(response_str, out);
  399. data.file_size = set_resume_point(output_file);
  400. set_progress_options(progress, data);
  401. set_headers(headers);
  402. CURLcode res = perform(url);
  403. if (res != CURLE_OK){
  404. printe("Fetching resource '%s' failed: %s\n", url.c_str(), curl_easy_strerror(res));
  405. return 1;
  406. }
  407. return 0;
  408. }
  409. void set_write_options(std::string * response_str, const File & out) {
  410. if (response_str) {
  411. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  412. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  413. } else {
  414. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  415. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  416. }
  417. }
  418. size_t set_resume_point(const std::string & output_file) {
  419. size_t file_size = 0;
  420. if (std::filesystem::exists(output_file)) {
  421. file_size = std::filesystem::file_size(output_file);
  422. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  423. }
  424. return file_size;
  425. }
  426. void set_progress_options(bool progress, progress_data & data) {
  427. if (progress) {
  428. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  429. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  430. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  431. }
  432. }
  433. void set_headers(const std::vector<std::string> & headers) {
  434. if (!headers.empty()) {
  435. if (chunk) {
  436. curl_slist_free_all(chunk);
  437. chunk = 0;
  438. }
  439. for (const auto & header : headers) {
  440. chunk = curl_slist_append(chunk, header.c_str());
  441. }
  442. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  443. }
  444. }
  445. CURLcode perform(const std::string & url) {
  446. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  447. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  448. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  449. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  450. #ifdef _WIN32
  451. curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  452. #endif
  453. return curl_easy_perform(curl);
  454. }
  455. #else // LLAMA_USE_CURL is not defined
  456. #define curl_off_t long long // temporary hack
  457. private:
  458. // this is a direct translation of the cURL download() above
  459. int download(const std::string & url, const std::vector<std::string> & headers_vec, const std::string & output_file,
  460. const bool progress, std::string * response_str = nullptr) {
  461. try {
  462. auto [cli, url_parts] = common_http_client(url);
  463. httplib::Headers headers;
  464. for (const auto & h : headers_vec) {
  465. size_t pos = h.find(':');
  466. if (pos != std::string::npos) {
  467. headers.emplace(h.substr(0, pos), h.substr(pos + 2));
  468. }
  469. }
  470. File out;
  471. if (!output_file.empty()) {
  472. if (!out.open(output_file, "ab")) {
  473. printe("Failed to open file for writing\n");
  474. return 1;
  475. }
  476. if (out.lock()) {
  477. printe("Failed to exclusively lock file\n");
  478. return 1;
  479. }
  480. }
  481. size_t resume_offset = 0;
  482. if (!output_file.empty() && std::filesystem::exists(output_file)) {
  483. resume_offset = std::filesystem::file_size(output_file);
  484. if (resume_offset > 0) {
  485. headers.emplace("Range", "bytes=" + std::to_string(resume_offset) + "-");
  486. }
  487. }
  488. progress_data data;
  489. data.file_size = resume_offset;
  490. long long total_size = 0;
  491. long long received_this_session = 0;
  492. auto response_handler =
  493. [&](const httplib::Response & response) {
  494. if (resume_offset > 0 && response.status != 206) {
  495. printe("\nServer does not support resuming. Restarting download.\n");
  496. out.file = freopen(output_file.c_str(), "wb", out.file);
  497. if (!out.file) {
  498. return false;
  499. }
  500. data.file_size = 0;
  501. }
  502. if (progress) {
  503. if (response.has_header("Content-Length")) {
  504. total_size = std::stoll(response.get_header_value("Content-Length"));
  505. } else if (response.has_header("Content-Range")) {
  506. auto range = response.get_header_value("Content-Range");
  507. auto slash = range.find('/');
  508. if (slash != std::string::npos) {
  509. total_size = std::stoll(range.substr(slash + 1));
  510. }
  511. }
  512. }
  513. return true;
  514. };
  515. auto content_receiver =
  516. [&](const char * chunk, size_t length) {
  517. if (out.file && fwrite(chunk, 1, length, out.file) != length) {
  518. return false;
  519. }
  520. if (response_str) {
  521. response_str->append(chunk, length);
  522. }
  523. received_this_session += length;
  524. if (progress && total_size > 0) {
  525. update_progress(&data, total_size, received_this_session, 0, 0);
  526. }
  527. return true;
  528. };
  529. auto res = cli.Get(url_parts.path, headers, response_handler, content_receiver);
  530. if (data.printed) {
  531. printe("\n");
  532. }
  533. if (!res) {
  534. auto err = res.error();
  535. printe("Fetching resource '%s' failed: %s\n", url.c_str(), httplib::to_string(err).c_str());
  536. return 1;
  537. }
  538. if (res->status >= 400) {
  539. printe("Fetching resource '%s' failed with status code: %d\n", url.c_str(), res->status);
  540. return 1;
  541. }
  542. } catch (const std::exception & e) {
  543. printe("HTTP request failed: %s\n", e.what());
  544. return 1;
  545. }
  546. return 0;
  547. }
  548. #endif // LLAMA_USE_CURL
  549. static std::string human_readable_time(double seconds) {
  550. int hrs = static_cast<int>(seconds) / 3600;
  551. int mins = (static_cast<int>(seconds) % 3600) / 60;
  552. int secs = static_cast<int>(seconds) % 60;
  553. if (hrs > 0) {
  554. return string_format("%dh %02dm %02ds", hrs, mins, secs);
  555. } else if (mins > 0) {
  556. return string_format("%dm %02ds", mins, secs);
  557. } else {
  558. return string_format("%ds", secs);
  559. }
  560. }
  561. static std::string human_readable_size(curl_off_t size) {
  562. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  563. char length = sizeof(suffix) / sizeof(suffix[0]);
  564. int i = 0;
  565. double dbl_size = size;
  566. if (size > 1024) {
  567. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  568. dbl_size = size / 1024.0;
  569. }
  570. }
  571. return string_format("%.2f %s", dbl_size, suffix[i]);
  572. }
  573. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  574. curl_off_t) {
  575. progress_data * data = static_cast<progress_data *>(ptr);
  576. if (total_to_download <= 0) {
  577. return 0;
  578. }
  579. total_to_download += data->file_size;
  580. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  581. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  582. std::string progress_prefix = generate_progress_prefix(percentage);
  583. const double speed = calculate_speed(now_downloaded, data->start_time);
  584. const double tim = (total_to_download - now_downloaded) / speed;
  585. std::string progress_suffix =
  586. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  587. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  588. std::string progress_bar;
  589. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  590. print_progress(progress_prefix, progress_bar, progress_suffix);
  591. data->printed = true;
  592. return 0;
  593. }
  594. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  595. return (now_downloaded_plus_file_size * 100) / total_to_download;
  596. }
  597. static std::string generate_progress_prefix(curl_off_t percentage) {
  598. return string_format("%3ld%% |", static_cast<long int>(percentage));
  599. }
  600. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  601. const auto now = std::chrono::steady_clock::now();
  602. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  603. return now_downloaded / elapsed_seconds.count();
  604. }
  605. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  606. double speed, double estimated_time) {
  607. const int width = 10;
  608. return string_format("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(),
  609. width, human_readable_size(total_to_download).c_str(), width,
  610. human_readable_size(speed).c_str(), width, human_readable_time(estimated_time).c_str());
  611. }
  612. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  613. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  614. if (progress_bar_width < 1) {
  615. progress_bar_width = 1;
  616. }
  617. return progress_bar_width;
  618. }
  619. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  620. std::string & progress_bar) {
  621. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  622. for (int i = 0; i < progress_bar_width; ++i) {
  623. progress_bar.append((i < pos) ? "█" : " ");
  624. }
  625. return progress_bar;
  626. }
  627. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  628. const std::string & progress_suffix) {
  629. printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
  630. }
  631. // Function to write data to a file
  632. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  633. FILE * out = static_cast<FILE *>(stream);
  634. return fwrite(ptr, size, nmemb, out);
  635. }
  636. // Function to capture data into a string
  637. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  638. std::string * str = static_cast<std::string *>(stream);
  639. str->append(static_cast<char *>(ptr), size * nmemb);
  640. return size * nmemb;
  641. }
  642. };
  643. class LlamaData {
  644. public:
  645. llama_model_ptr model;
  646. llama_sampler_ptr sampler;
  647. llama_context_ptr context;
  648. std::vector<llama_chat_message> messages; // TODO: switch to common_chat_msg
  649. std::list<std::string> msg_strs;
  650. std::vector<char> fmtted;
  651. int init(Opt & opt) {
  652. model = initialize_model(opt);
  653. if (!model) {
  654. return 1;
  655. }
  656. context = initialize_context(model, opt);
  657. if (!context) {
  658. return 1;
  659. }
  660. sampler = initialize_sampler(opt);
  661. return 0;
  662. }
  663. private:
  664. int download(const std::string & url, const std::string & output_file, const bool progress,
  665. const std::vector<std::string> & headers = {}, std::string * response_str = nullptr) {
  666. HttpClient http;
  667. if (http.init(url, headers, output_file, progress, response_str)) {
  668. return 1;
  669. }
  670. return 0;
  671. }
  672. // Helper function to handle model tag extraction and URL construction
  673. std::pair<std::string, std::string> extract_model_and_tag(std::string & model, const std::string & base_url) {
  674. std::string model_tag = "latest";
  675. const size_t colon_pos = model.find(':');
  676. if (colon_pos != std::string::npos) {
  677. model_tag = model.substr(colon_pos + 1);
  678. model = model.substr(0, colon_pos);
  679. }
  680. std::string url = base_url + model + "/manifests/" + model_tag;
  681. return { model, url };
  682. }
  683. // Helper function to download and parse the manifest
  684. int download_and_parse_manifest(const std::string & url, const std::vector<std::string> & headers,
  685. nlohmann::json & manifest) {
  686. std::string manifest_str;
  687. int ret = download(url, "", false, headers, &manifest_str);
  688. if (ret) {
  689. return ret;
  690. }
  691. manifest = nlohmann::json::parse(manifest_str);
  692. return 0;
  693. }
  694. int dl_from_endpoint(std::string & model_endpoint, std::string & model, const std::string & bn) {
  695. // Find the second occurrence of '/' after protocol string
  696. size_t pos = model.find('/');
  697. pos = model.find('/', pos + 1);
  698. std::string hfr, hff;
  699. std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" };
  700. std::string url;
  701. if (pos == std::string::npos) {
  702. auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + "v2/");
  703. hfr = model_name;
  704. nlohmann::json manifest;
  705. int ret = download_and_parse_manifest(manifest_url, headers, manifest);
  706. if (ret) {
  707. return ret;
  708. }
  709. hff = manifest["ggufFile"]["rfilename"];
  710. } else {
  711. hfr = model.substr(0, pos);
  712. hff = model.substr(pos + 1);
  713. }
  714. url = model_endpoint + hfr + "/resolve/main/" + hff;
  715. return download(url, bn, true, headers);
  716. }
  717. int modelscope_dl(std::string & model, const std::string & bn) {
  718. std::string model_endpoint = "https://modelscope.cn/models/";
  719. return dl_from_endpoint(model_endpoint, model, bn);
  720. }
  721. int huggingface_dl(std::string & model, const std::string & bn) {
  722. std::string model_endpoint = get_model_endpoint();
  723. return dl_from_endpoint(model_endpoint, model, bn);
  724. }
  725. int ollama_dl(std::string & model, const std::string & bn) {
  726. const std::vector<std::string> headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  727. if (model.find('/') == std::string::npos) {
  728. model = "library/" + model;
  729. }
  730. auto [model_name, manifest_url] = extract_model_and_tag(model, "https://registry.ollama.ai/v2/");
  731. nlohmann::json manifest;
  732. int ret = download_and_parse_manifest(manifest_url, {}, manifest);
  733. if (ret) {
  734. return ret;
  735. }
  736. std::string layer;
  737. for (const auto & l : manifest["layers"]) {
  738. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  739. layer = l["digest"];
  740. break;
  741. }
  742. }
  743. std::string blob_url = "https://registry.ollama.ai/v2/" + model_name + "/blobs/" + layer;
  744. return download(blob_url, bn, true, headers);
  745. }
  746. int github_dl(const std::string & model, const std::string & bn) {
  747. std::string repository = model;
  748. std::string branch = "main";
  749. const size_t at_pos = model.find('@');
  750. if (at_pos != std::string::npos) {
  751. repository = model.substr(0, at_pos);
  752. branch = model.substr(at_pos + 1);
  753. }
  754. const std::vector<std::string> repo_parts = string_split(repository, "/");
  755. if (repo_parts.size() < 3) {
  756. printe("Invalid GitHub repository format\n");
  757. return 1;
  758. }
  759. const std::string & org = repo_parts[0];
  760. const std::string & project = repo_parts[1];
  761. std::string url = "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch;
  762. for (size_t i = 2; i < repo_parts.size(); ++i) {
  763. url += "/" + repo_parts[i];
  764. }
  765. return download(url, bn, true);
  766. }
  767. int s3_dl(const std::string & model, const std::string & bn) {
  768. const size_t slash_pos = model.find('/');
  769. if (slash_pos == std::string::npos) {
  770. return 1;
  771. }
  772. const std::string bucket = model.substr(0, slash_pos);
  773. const std::string key = model.substr(slash_pos + 1);
  774. const char * access_key = std::getenv("AWS_ACCESS_KEY_ID");
  775. const char * secret_key = std::getenv("AWS_SECRET_ACCESS_KEY");
  776. if (!access_key || !secret_key) {
  777. printe("AWS credentials not found in environment\n");
  778. return 1;
  779. }
  780. // Generate AWS Signature Version 4 headers
  781. // (Implementation requires HMAC-SHA256 and date handling)
  782. // Get current timestamp
  783. const time_t now = time(nullptr);
  784. const tm tm = *gmtime(&now);
  785. const std::string date = strftime_fmt("%Y%m%d", tm);
  786. const std::string datetime = strftime_fmt("%Y%m%dT%H%M%SZ", tm);
  787. const std::vector<std::string> headers = {
  788. "Authorization: AWS4-HMAC-SHA256 Credential=" + std::string(access_key) + "/" + date +
  789. "/us-east-1/s3/aws4_request",
  790. "x-amz-content-sha256: UNSIGNED-PAYLOAD", "x-amz-date: " + datetime
  791. };
  792. const std::string url = "https://" + bucket + ".s3.amazonaws.com/" + key;
  793. return download(url, bn, true, headers);
  794. }
  795. std::string basename(const std::string & path) {
  796. const size_t pos = path.find_last_of("/\\");
  797. if (pos == std::string::npos) {
  798. return path;
  799. }
  800. return path.substr(pos + 1);
  801. }
  802. int rm_until_substring(std::string & model_, const std::string & substring) {
  803. const std::string::size_type pos = model_.find(substring);
  804. if (pos == std::string::npos) {
  805. return 1;
  806. }
  807. model_ = model_.substr(pos + substring.size()); // Skip past the substring
  808. return 0;
  809. }
  810. int resolve_model(std::string & model_) {
  811. int ret = 0;
  812. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  813. rm_until_substring(model_, "://");
  814. return ret;
  815. }
  816. const std::string bn = basename(model_);
  817. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://") ||
  818. string_starts_with(model_, "hf.co/")) {
  819. rm_until_substring(model_, "hf.co/");
  820. rm_until_substring(model_, "://");
  821. ret = huggingface_dl(model_, bn);
  822. } else if (string_starts_with(model_, "ms://") || string_starts_with(model_, "modelscope://")) {
  823. rm_until_substring(model_, "://");
  824. ret = modelscope_dl(model_, bn);
  825. } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) &&
  826. !string_starts_with(model_, "https://ollama.com/library/")) {
  827. ret = download(model_, bn, true);
  828. } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) {
  829. rm_until_substring(model_, "github:");
  830. rm_until_substring(model_, "://");
  831. ret = github_dl(model_, bn);
  832. } else if (string_starts_with(model_, "s3://")) {
  833. rm_until_substring(model_, "://");
  834. ret = s3_dl(model_, bn);
  835. } else { // ollama:// or nothing
  836. rm_until_substring(model_, "ollama.com/library/");
  837. rm_until_substring(model_, "://");
  838. ret = ollama_dl(model_, bn);
  839. }
  840. model_ = bn;
  841. return ret;
  842. }
  843. // Initializes the model and returns a unique pointer to it
  844. llama_model_ptr initialize_model(Opt & opt) {
  845. ggml_backend_load_all();
  846. resolve_model(opt.model_);
  847. printe("\r" LOG_CLR_TO_EOL "Loading model");
  848. llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
  849. if (!model) {
  850. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  851. }
  852. printe("\r" LOG_CLR_TO_EOL);
  853. return model;
  854. }
  855. // Initializes the context with the specified parameters
  856. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  857. llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params));
  858. if (!context) {
  859. printe("%s: error: failed to create the llama_context\n", __func__);
  860. }
  861. return context;
  862. }
  863. // Initializes and configures the sampler
  864. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  865. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  866. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  867. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  868. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  869. return sampler;
  870. }
  871. };
  872. // Add a message to `messages` and store its content in `msg_strs`
  873. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  874. llama_data.msg_strs.push_back(std::move(text));
  875. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  876. }
  877. // Function to apply the chat template and resize `formatted` if needed
  878. static int apply_chat_template(const struct common_chat_templates * tmpls, LlamaData & llama_data, const bool append, bool use_jinja) {
  879. common_chat_templates_inputs inputs;
  880. for (const auto & msg : llama_data.messages) {
  881. common_chat_msg cmsg;
  882. cmsg.role = msg.role;
  883. cmsg.content = msg.content;
  884. inputs.messages.push_back(cmsg);
  885. }
  886. inputs.add_generation_prompt = append;
  887. inputs.use_jinja = use_jinja;
  888. auto chat_params = common_chat_templates_apply(tmpls, inputs);
  889. // TODO: use other params for tool calls.
  890. auto result = chat_params.prompt;
  891. llama_data.fmtted.resize(result.size() + 1);
  892. memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
  893. return result.size();
  894. }
  895. // Function to tokenize the prompt
  896. static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
  897. std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
  898. const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == -1;
  899. int n_tokens = prompt.size() + 2 * is_first;
  900. prompt_tokens.resize(n_tokens);
  901. n_tokens = llama_tokenize(vocab, prompt.c_str(), prompt.size(),
  902. prompt_tokens.data(), prompt_tokens.size(),
  903. is_first, /*parse_special =*/true);
  904. if (n_tokens == std::numeric_limits<int32_t>::min()) {
  905. printe("tokenization failed: input too large\n");
  906. return -1;
  907. }
  908. if (n_tokens < 0) {
  909. prompt_tokens.resize(-n_tokens);
  910. int check = llama_tokenize(vocab, prompt.c_str(), prompt.size(),
  911. prompt_tokens.data(), prompt_tokens.size(),
  912. is_first, /*parse_special =*/true);
  913. if (check != -n_tokens) {
  914. printe("failed to tokenize the prompt (size mismatch)\n");
  915. return -1;
  916. }
  917. n_tokens = check;
  918. } else {
  919. prompt_tokens.resize(n_tokens);
  920. }
  921. return n_tokens;
  922. }
  923. // Check if we have enough space in the context to evaluate this batch
  924. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  925. const int n_ctx = llama_n_ctx(ctx.get());
  926. const int n_ctx_used = llama_memory_seq_pos_max(llama_get_memory(ctx.get()), 0);
  927. if (n_ctx_used + batch.n_tokens > n_ctx) {
  928. printf(LOG_COL_DEFAULT "\n");
  929. printe("context size exceeded\n");
  930. return 1;
  931. }
  932. return 0;
  933. }
  934. // convert the token to a string
  935. static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) {
  936. char buf[256];
  937. int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true);
  938. if (n < 0) {
  939. printe("failed to convert token to piece\n");
  940. return 1;
  941. }
  942. piece = std::string(buf, n);
  943. return 0;
  944. }
  945. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  946. printf("%s", piece.c_str());
  947. fflush(stdout);
  948. response += piece;
  949. }
  950. // helper function to evaluate a prompt and generate a response
  951. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  952. const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get());
  953. std::vector<llama_token> tokens;
  954. if (tokenize_prompt(vocab, prompt, tokens, llama_data) < 0) {
  955. return 1;
  956. }
  957. // prepare a batch for the prompt
  958. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  959. llama_token new_token_id;
  960. while (true) {
  961. check_context_size(llama_data.context, batch);
  962. if (llama_decode(llama_data.context.get(), batch)) {
  963. printe("failed to decode\n");
  964. return 1;
  965. }
  966. // sample the next token, check is it an end of generation?
  967. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  968. if (llama_vocab_is_eog(vocab, new_token_id)) {
  969. break;
  970. }
  971. std::string piece;
  972. if (convert_token_to_string(vocab, new_token_id, piece)) {
  973. return 1;
  974. }
  975. print_word_and_concatenate_to_response(piece, response);
  976. // prepare the next batch with the sampled token
  977. batch = llama_batch_get_one(&new_token_id, 1);
  978. }
  979. printf(LOG_COL_DEFAULT);
  980. return 0;
  981. }
  982. static int read_user_input(std::string & user_input) {
  983. static const char * prompt_prefix_env = std::getenv("LLAMA_PROMPT_PREFIX");
  984. static const char * prompt_prefix = prompt_prefix_env ? prompt_prefix_env : "> ";
  985. #ifdef WIN32
  986. printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
  987. std::getline(std::cin, user_input);
  988. if (std::cin.eof()) {
  989. printf("\n");
  990. return 1;
  991. }
  992. #else
  993. std::unique_ptr<char, decltype(&std::free)> line(const_cast<char *>(linenoise(prompt_prefix)), free);
  994. if (!line) {
  995. return 1;
  996. }
  997. user_input = line.get();
  998. #endif
  999. if (user_input == "/bye") {
  1000. return 1;
  1001. }
  1002. if (user_input.empty()) {
  1003. return 2;
  1004. }
  1005. #ifndef WIN32
  1006. linenoiseHistoryAdd(line.get());
  1007. #endif
  1008. return 0; // Should have data in happy path
  1009. }
  1010. // Function to generate a response based on the prompt
  1011. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  1012. const bool stdout_a_terminal) {
  1013. // Set response color
  1014. if (stdout_a_terminal) {
  1015. printf(LOG_COL_YELLOW);
  1016. }
  1017. if (generate(llama_data, prompt, response)) {
  1018. printe("failed to generate response\n");
  1019. return 1;
  1020. }
  1021. // End response with color reset and newline
  1022. printf("\n%s", stdout_a_terminal ? LOG_COL_DEFAULT : "");
  1023. return 0;
  1024. }
  1025. // Helper function to apply the chat template and handle errors
  1026. static int apply_chat_template_with_error_handling(const common_chat_templates * tmpls, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
  1027. const int new_len = apply_chat_template(tmpls, llama_data, append, use_jinja);
  1028. if (new_len < 0) {
  1029. printe("failed to apply the chat template\n");
  1030. return -1;
  1031. }
  1032. output_length = new_len;
  1033. return 0;
  1034. }
  1035. // Helper function to handle user input
  1036. static int handle_user_input(std::string & user_input, const std::string & user) {
  1037. if (!user.empty()) {
  1038. user_input = user;
  1039. return 0; // No need for interactive input
  1040. }
  1041. return read_user_input(user_input); // Returns true if input ends the loop
  1042. }
  1043. static bool is_stdin_a_terminal() {
  1044. #if defined(_WIN32)
  1045. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  1046. DWORD mode;
  1047. return GetConsoleMode(hStdin, &mode);
  1048. #else
  1049. return isatty(STDIN_FILENO);
  1050. #endif
  1051. }
  1052. static bool is_stdout_a_terminal() {
  1053. #if defined(_WIN32)
  1054. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  1055. DWORD mode;
  1056. return GetConsoleMode(hStdout, &mode);
  1057. #else
  1058. return isatty(STDOUT_FILENO);
  1059. #endif
  1060. }
  1061. // Function to handle user input
  1062. static int get_user_input(std::string & user_input, const std::string & user) {
  1063. while (true) {
  1064. const int ret = handle_user_input(user_input, user);
  1065. if (ret == 1) {
  1066. return 1;
  1067. }
  1068. if (ret == 2) {
  1069. continue;
  1070. }
  1071. break;
  1072. }
  1073. return 0;
  1074. }
  1075. // Reads a chat template file to be used
  1076. static std::string read_chat_template_file(const std::string & chat_template_file) {
  1077. File file;
  1078. if (!file.open(chat_template_file, "r")) {
  1079. printe("Error opening chat template file '%s': %s", chat_template_file.c_str(), strerror(errno));
  1080. return "";
  1081. }
  1082. return file.to_string();
  1083. }
  1084. static int process_user_message(const Opt & opt, const std::string & user_input, LlamaData & llama_data,
  1085. const common_chat_templates_ptr & chat_templates, int & prev_len,
  1086. const bool stdout_a_terminal) {
  1087. add_message("user", opt.user.empty() ? user_input : opt.user, llama_data);
  1088. int new_len;
  1089. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, true, new_len, opt.use_jinja) < 0) {
  1090. return 1;
  1091. }
  1092. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  1093. std::string response;
  1094. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  1095. return 1;
  1096. }
  1097. if (!opt.user.empty()) {
  1098. return 2;
  1099. }
  1100. add_message("assistant", response, llama_data);
  1101. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, false, prev_len, opt.use_jinja) < 0) {
  1102. return 1;
  1103. }
  1104. return 0;
  1105. }
  1106. // Main chat loop function
  1107. static int chat_loop(LlamaData & llama_data, const Opt & opt) {
  1108. int prev_len = 0;
  1109. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  1110. std::string chat_template;
  1111. if (!opt.chat_template_file.empty()) {
  1112. chat_template = read_chat_template_file(opt.chat_template_file);
  1113. }
  1114. common_chat_templates_ptr chat_templates = common_chat_templates_init(llama_data.model.get(), chat_template);
  1115. static const bool stdout_a_terminal = is_stdout_a_terminal();
  1116. while (true) {
  1117. // Get user input
  1118. std::string user_input;
  1119. if (get_user_input(user_input, opt.user) == 1) {
  1120. return 0;
  1121. }
  1122. const int ret = process_user_message(opt, user_input, llama_data, chat_templates, prev_len, stdout_a_terminal);
  1123. if (ret == 1) {
  1124. return 1;
  1125. } else if (ret == 2) {
  1126. break;
  1127. }
  1128. }
  1129. return 0;
  1130. }
  1131. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  1132. const Opt * opt = static_cast<Opt *>(p);
  1133. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  1134. printe("%s", text);
  1135. }
  1136. }
  1137. static std::string read_pipe_data() {
  1138. std::ostringstream result;
  1139. result << std::cin.rdbuf(); // Read all data from std::cin
  1140. return result.str();
  1141. }
  1142. static void ctrl_c_handling() {
  1143. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  1144. struct sigaction sigint_action;
  1145. sigint_action.sa_handler = sigint_handler;
  1146. sigemptyset(&sigint_action.sa_mask);
  1147. sigint_action.sa_flags = 0;
  1148. sigaction(SIGINT, &sigint_action, NULL);
  1149. #elif defined(_WIN32)
  1150. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  1151. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  1152. };
  1153. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  1154. #endif
  1155. }
  1156. int main(int argc, const char ** argv) {
  1157. ctrl_c_handling();
  1158. Opt opt;
  1159. const int ret = opt.init(argc, argv);
  1160. if (ret == 2) {
  1161. return 0;
  1162. } else if (ret) {
  1163. return 1;
  1164. }
  1165. if (!is_stdin_a_terminal()) {
  1166. if (!opt.user.empty()) {
  1167. opt.user += "\n\n";
  1168. }
  1169. opt.user += read_pipe_data();
  1170. }
  1171. llama_log_set(log_callback, &opt);
  1172. LlamaData llama_data;
  1173. if (llama_data.init(opt)) {
  1174. return 1;
  1175. }
  1176. if (chat_loop(llama_data, opt)) {
  1177. return 1;
  1178. }
  1179. return 0;
  1180. }