run.cpp 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. #if defined(_WIN32)
  2. # include <windows.h>
  3. # include <io.h>
  4. #else
  5. # include <sys/file.h>
  6. # include <sys/ioctl.h>
  7. # include <unistd.h>
  8. #endif
  9. #if defined(LLAMA_USE_CURL)
  10. # include <curl/curl.h>
  11. #endif
  12. #include <signal.h>
  13. #include <climits>
  14. #include <cstdarg>
  15. #include <cstdio>
  16. #include <cstring>
  17. #include <filesystem>
  18. #include <iostream>
  19. #include <list>
  20. #include <sstream>
  21. #include <string>
  22. #include <vector>
  23. #include "chat.h"
  24. #include "common.h"
  25. #include "json.hpp"
  26. #include "linenoise.cpp/linenoise.h"
  27. #include "llama-cpp.h"
  28. #include "log.h"
  29. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
  30. [[noreturn]] static void sigint_handler(int) {
  31. printf("\n" LOG_COL_DEFAULT);
  32. exit(0); // not ideal, but it's the only way to guarantee exit in all cases
  33. }
  34. #endif
  35. GGML_ATTRIBUTE_FORMAT(1, 2)
  36. static int printe(const char * fmt, ...) {
  37. va_list args;
  38. va_start(args, fmt);
  39. const int ret = vfprintf(stderr, fmt, args);
  40. va_end(args);
  41. return ret;
  42. }
  43. static std::string strftime_fmt(const char * fmt, const std::tm & tm) {
  44. std::ostringstream oss;
  45. oss << std::put_time(&tm, fmt);
  46. return oss.str();
  47. }
  48. class Opt {
  49. public:
  50. int init(int argc, const char ** argv) {
  51. ctx_params = llama_context_default_params();
  52. model_params = llama_model_default_params();
  53. context_size_default = ctx_params.n_batch;
  54. n_threads_default = ctx_params.n_threads;
  55. ngl_default = model_params.n_gpu_layers;
  56. common_params_sampling sampling;
  57. temperature_default = sampling.temp;
  58. if (argc < 2) {
  59. printe("Error: No arguments provided.\n");
  60. print_help();
  61. return 1;
  62. }
  63. // Parse arguments
  64. if (parse(argc, argv)) {
  65. printe("Error: Failed to parse arguments.\n");
  66. print_help();
  67. return 1;
  68. }
  69. // If help is requested, show help and exit
  70. if (help) {
  71. print_help();
  72. return 2;
  73. }
  74. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  75. ctx_params.n_ctx = ctx_params.n_batch;
  76. ctx_params.n_threads = ctx_params.n_threads_batch = n_threads >= 0 ? n_threads : n_threads_default;
  77. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  78. temperature = temperature >= 0 ? temperature : temperature_default;
  79. return 0; // Success
  80. }
  81. llama_context_params ctx_params;
  82. llama_model_params model_params;
  83. std::string model_;
  84. std::string chat_template_file;
  85. std::string user;
  86. bool use_jinja = false;
  87. int context_size = -1, ngl = -1, n_threads = -1;
  88. float temperature = -1;
  89. bool verbose = false;
  90. private:
  91. int context_size_default = -1, ngl_default = -1, n_threads_default = -1;
  92. float temperature_default = -1;
  93. bool help = false;
  94. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  95. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  96. }
  97. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  98. if (i + 1 >= argc) {
  99. return 1;
  100. }
  101. option_value = std::atoi(argv[++i]);
  102. return 0;
  103. }
  104. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  105. if (i + 1 >= argc) {
  106. return 1;
  107. }
  108. option_value = std::atof(argv[++i]);
  109. return 0;
  110. }
  111. int handle_option_with_value(int argc, const char ** argv, int & i, std::string & option_value) {
  112. if (i + 1 >= argc) {
  113. return 1;
  114. }
  115. option_value = argv[++i];
  116. return 0;
  117. }
  118. int parse_options_with_value(int argc, const char ** argv, int & i, bool & options_parsing) {
  119. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  120. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  121. return 1;
  122. }
  123. } else if (options_parsing &&
  124. (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  125. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  126. return 1;
  127. }
  128. } else if (options_parsing && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--threads") == 0)) {
  129. if (handle_option_with_value(argc, argv, i, n_threads) == 1) {
  130. return 1;
  131. }
  132. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  133. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  134. return 1;
  135. }
  136. } else if (options_parsing && strcmp(argv[i], "--chat-template-file") == 0) {
  137. if (handle_option_with_value(argc, argv, i, chat_template_file) == 1) {
  138. return 1;
  139. }
  140. use_jinja = true;
  141. } else {
  142. return 2;
  143. }
  144. return 0;
  145. }
  146. int parse_options(const char ** argv, int & i, bool & options_parsing) {
  147. if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  148. verbose = true;
  149. } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) {
  150. use_jinja = true;
  151. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  152. help = true;
  153. return 0;
  154. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  155. options_parsing = false;
  156. } else {
  157. return 2;
  158. }
  159. return 0;
  160. }
  161. int parse_positional_args(const char ** argv, int & i, int & positional_args_i) {
  162. if (positional_args_i == 0) {
  163. if (!argv[i][0] || argv[i][0] == '-') {
  164. return 1;
  165. }
  166. ++positional_args_i;
  167. model_ = argv[i];
  168. } else if (positional_args_i == 1) {
  169. ++positional_args_i;
  170. user = argv[i];
  171. } else {
  172. user += " " + std::string(argv[i]);
  173. }
  174. return 0;
  175. }
  176. int parse(int argc, const char ** argv) {
  177. bool options_parsing = true;
  178. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  179. int ret = parse_options_with_value(argc, argv, i, options_parsing);
  180. if (ret == 0) {
  181. continue;
  182. } else if (ret == 1) {
  183. return ret;
  184. }
  185. ret = parse_options(argv, i, options_parsing);
  186. if (ret == 0) {
  187. continue;
  188. } else if (ret == 1) {
  189. return ret;
  190. }
  191. if (parse_positional_args(argv, i, positional_args_i)) {
  192. return 1;
  193. }
  194. }
  195. if (model_.empty()) {
  196. return 1;
  197. }
  198. return 0;
  199. }
  200. void print_help() const {
  201. printf(
  202. "Description:\n"
  203. " Runs a llm\n"
  204. "\n"
  205. "Usage:\n"
  206. " llama-run [options] model [prompt]\n"
  207. "\n"
  208. "Options:\n"
  209. " -c, --context-size <value>\n"
  210. " Context size (default: %d)\n"
  211. " --chat-template-file <path>\n"
  212. " Path to the file containing the chat template to use with the model.\n"
  213. " Only supports jinja templates and implicitly sets the --jinja flag.\n"
  214. " --jinja\n"
  215. " Use jinja templating for the chat template of the model\n"
  216. " -n, -ngl, --ngl <value>\n"
  217. " Number of GPU layers (default: %d)\n"
  218. " --temp <value>\n"
  219. " Temperature (default: %.1f)\n"
  220. " -t, --threads <value>\n"
  221. " Number of threads to use during generation (default: %d)\n"
  222. " -v, --verbose, --log-verbose\n"
  223. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  224. " -h, --help\n"
  225. " Show help message\n"
  226. "\n"
  227. "Commands:\n"
  228. " model\n"
  229. " Model is a string with an optional prefix of \n"
  230. " huggingface:// (hf://), ollama://, https:// or file://.\n"
  231. " If no protocol is specified and a file exists in the specified\n"
  232. " path, file:// is assumed, otherwise if a file does not exist in\n"
  233. " the specified path, ollama:// is assumed. Models that are being\n"
  234. " pulled are downloaded with .partial extension while being\n"
  235. " downloaded and then renamed as the file without the .partial\n"
  236. " extension when complete.\n"
  237. "\n"
  238. "Examples:\n"
  239. " llama-run llama3\n"
  240. " llama-run ollama://granite-code\n"
  241. " llama-run ollama://smollm:135m\n"
  242. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  243. " llama-run "
  244. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  245. " llama-run https://example.com/some-file1.gguf\n"
  246. " llama-run some-file2.gguf\n"
  247. " llama-run file://some-file3.gguf\n"
  248. " llama-run --ngl 999 some-file4.gguf\n"
  249. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  250. context_size_default, ngl_default, temperature_default, n_threads_default);
  251. }
  252. };
  253. struct progress_data {
  254. size_t file_size = 0;
  255. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  256. bool printed = false;
  257. };
  258. static int get_terminal_width() {
  259. #if defined(_WIN32)
  260. CONSOLE_SCREEN_BUFFER_INFO csbi;
  261. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  262. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  263. #else
  264. struct winsize w;
  265. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  266. return w.ws_col;
  267. #endif
  268. }
  269. class File {
  270. public:
  271. FILE * file = nullptr;
  272. FILE * open(const std::string & filename, const char * mode) {
  273. file = ggml_fopen(filename.c_str(), mode);
  274. return file;
  275. }
  276. int lock() {
  277. if (file) {
  278. # ifdef _WIN32
  279. fd = _fileno(file);
  280. hFile = (HANDLE) _get_osfhandle(fd);
  281. if (hFile == INVALID_HANDLE_VALUE) {
  282. fd = -1;
  283. return 1;
  284. }
  285. OVERLAPPED overlapped = {};
  286. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  287. &overlapped)) {
  288. fd = -1;
  289. return 1;
  290. }
  291. # else
  292. fd = fileno(file);
  293. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  294. fd = -1;
  295. return 1;
  296. }
  297. # endif
  298. }
  299. return 0;
  300. }
  301. std::string to_string() {
  302. fseek(file, 0, SEEK_END);
  303. const size_t size = ftell(file);
  304. fseek(file, 0, SEEK_SET);
  305. std::string out;
  306. out.resize(size);
  307. const size_t read_size = fread(&out[0], 1, size, file);
  308. if (read_size != size) {
  309. printe("Error reading file: %s", strerror(errno));
  310. }
  311. return out;
  312. }
  313. ~File() {
  314. if (fd >= 0) {
  315. # ifdef _WIN32
  316. if (hFile != INVALID_HANDLE_VALUE) {
  317. OVERLAPPED overlapped = {};
  318. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  319. }
  320. # else
  321. flock(fd, LOCK_UN);
  322. # endif
  323. }
  324. if (file) {
  325. fclose(file);
  326. }
  327. }
  328. private:
  329. int fd = -1;
  330. # ifdef _WIN32
  331. HANDLE hFile = nullptr;
  332. # endif
  333. };
  334. #ifdef LLAMA_USE_CURL
  335. class HttpClient {
  336. public:
  337. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  338. const bool progress, std::string * response_str = nullptr) {
  339. if (std::filesystem::exists(output_file)) {
  340. return 0;
  341. }
  342. std::string output_file_partial;
  343. curl = curl_easy_init();
  344. if (!curl) {
  345. return 1;
  346. }
  347. progress_data data;
  348. File out;
  349. if (!output_file.empty()) {
  350. output_file_partial = output_file + ".partial";
  351. if (!out.open(output_file_partial, "ab")) {
  352. printe("Failed to open file for writing\n");
  353. return 1;
  354. }
  355. if (out.lock()) {
  356. printe("Failed to exclusively lock file\n");
  357. return 1;
  358. }
  359. }
  360. set_write_options(response_str, out);
  361. data.file_size = set_resume_point(output_file_partial);
  362. set_progress_options(progress, data);
  363. set_headers(headers);
  364. CURLcode res = perform(url);
  365. if (res != CURLE_OK){
  366. printe("Fetching resource '%s' failed: %s\n", url.c_str(), curl_easy_strerror(res));
  367. return 1;
  368. }
  369. if (!output_file.empty()) {
  370. std::filesystem::rename(output_file_partial, output_file);
  371. }
  372. return 0;
  373. }
  374. ~HttpClient() {
  375. if (chunk) {
  376. curl_slist_free_all(chunk);
  377. }
  378. if (curl) {
  379. curl_easy_cleanup(curl);
  380. }
  381. }
  382. private:
  383. CURL * curl = nullptr;
  384. struct curl_slist * chunk = nullptr;
  385. void set_write_options(std::string * response_str, const File & out) {
  386. if (response_str) {
  387. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  388. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  389. } else {
  390. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  391. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  392. }
  393. }
  394. size_t set_resume_point(const std::string & output_file) {
  395. size_t file_size = 0;
  396. if (std::filesystem::exists(output_file)) {
  397. file_size = std::filesystem::file_size(output_file);
  398. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  399. }
  400. return file_size;
  401. }
  402. void set_progress_options(bool progress, progress_data & data) {
  403. if (progress) {
  404. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  405. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  406. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  407. }
  408. }
  409. void set_headers(const std::vector<std::string> & headers) {
  410. if (!headers.empty()) {
  411. if (chunk) {
  412. curl_slist_free_all(chunk);
  413. chunk = 0;
  414. }
  415. for (const auto & header : headers) {
  416. chunk = curl_slist_append(chunk, header.c_str());
  417. }
  418. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  419. }
  420. }
  421. CURLcode perform(const std::string & url) {
  422. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  423. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  424. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  425. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  426. return curl_easy_perform(curl);
  427. }
  428. static std::string human_readable_time(double seconds) {
  429. int hrs = static_cast<int>(seconds) / 3600;
  430. int mins = (static_cast<int>(seconds) % 3600) / 60;
  431. int secs = static_cast<int>(seconds) % 60;
  432. if (hrs > 0) {
  433. return string_format("%dh %02dm %02ds", hrs, mins, secs);
  434. } else if (mins > 0) {
  435. return string_format("%dm %02ds", mins, secs);
  436. } else {
  437. return string_format("%ds", secs);
  438. }
  439. }
  440. static std::string human_readable_size(curl_off_t size) {
  441. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  442. char length = sizeof(suffix) / sizeof(suffix[0]);
  443. int i = 0;
  444. double dbl_size = size;
  445. if (size > 1024) {
  446. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  447. dbl_size = size / 1024.0;
  448. }
  449. }
  450. return string_format("%.2f %s", dbl_size, suffix[i]);
  451. }
  452. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  453. curl_off_t) {
  454. progress_data * data = static_cast<progress_data *>(ptr);
  455. if (total_to_download <= 0) {
  456. return 0;
  457. }
  458. total_to_download += data->file_size;
  459. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  460. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  461. std::string progress_prefix = generate_progress_prefix(percentage);
  462. const double speed = calculate_speed(now_downloaded, data->start_time);
  463. const double tim = (total_to_download - now_downloaded) / speed;
  464. std::string progress_suffix =
  465. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  466. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  467. std::string progress_bar;
  468. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  469. print_progress(progress_prefix, progress_bar, progress_suffix);
  470. data->printed = true;
  471. return 0;
  472. }
  473. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  474. return (now_downloaded_plus_file_size * 100) / total_to_download;
  475. }
  476. static std::string generate_progress_prefix(curl_off_t percentage) {
  477. return string_format("%3ld%% |", static_cast<long int>(percentage));
  478. }
  479. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  480. const auto now = std::chrono::steady_clock::now();
  481. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  482. return now_downloaded / elapsed_seconds.count();
  483. }
  484. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  485. double speed, double estimated_time) {
  486. const int width = 10;
  487. return string_format("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(),
  488. width, human_readable_size(total_to_download).c_str(), width,
  489. human_readable_size(speed).c_str(), width, human_readable_time(estimated_time).c_str());
  490. }
  491. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  492. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  493. if (progress_bar_width < 1) {
  494. progress_bar_width = 1;
  495. }
  496. return progress_bar_width;
  497. }
  498. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  499. std::string & progress_bar) {
  500. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  501. for (int i = 0; i < progress_bar_width; ++i) {
  502. progress_bar.append((i < pos) ? "█" : " ");
  503. }
  504. return progress_bar;
  505. }
  506. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  507. const std::string & progress_suffix) {
  508. printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
  509. }
  510. // Function to write data to a file
  511. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  512. FILE * out = static_cast<FILE *>(stream);
  513. return fwrite(ptr, size, nmemb, out);
  514. }
  515. // Function to capture data into a string
  516. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  517. std::string * str = static_cast<std::string *>(stream);
  518. str->append(static_cast<char *>(ptr), size * nmemb);
  519. return size * nmemb;
  520. }
  521. };
  522. #endif
  523. class LlamaData {
  524. public:
  525. llama_model_ptr model;
  526. llama_sampler_ptr sampler;
  527. llama_context_ptr context;
  528. std::vector<llama_chat_message> messages; // TODO: switch to common_chat_msg
  529. std::list<std::string> msg_strs;
  530. std::vector<char> fmtted;
  531. int init(Opt & opt) {
  532. model = initialize_model(opt);
  533. if (!model) {
  534. return 1;
  535. }
  536. context = initialize_context(model, opt);
  537. if (!context) {
  538. return 1;
  539. }
  540. sampler = initialize_sampler(opt);
  541. return 0;
  542. }
  543. private:
  544. #ifdef LLAMA_USE_CURL
  545. int download(const std::string & url, const std::string & output_file, const bool progress,
  546. const std::vector<std::string> & headers = {}, std::string * response_str = nullptr) {
  547. HttpClient http;
  548. if (http.init(url, headers, output_file, progress, response_str)) {
  549. return 1;
  550. }
  551. return 0;
  552. }
  553. #else
  554. int download(const std::string &, const std::string &, const bool, const std::vector<std::string> & = {},
  555. std::string * = nullptr) {
  556. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  557. return 1;
  558. }
  559. #endif
  560. // Helper function to handle model tag extraction and URL construction
  561. std::pair<std::string, std::string> extract_model_and_tag(std::string & model, const std::string & base_url) {
  562. std::string model_tag = "latest";
  563. const size_t colon_pos = model.find(':');
  564. if (colon_pos != std::string::npos) {
  565. model_tag = model.substr(colon_pos + 1);
  566. model = model.substr(0, colon_pos);
  567. }
  568. std::string url = base_url + model + "/manifests/" + model_tag;
  569. return { model, url };
  570. }
  571. // Helper function to download and parse the manifest
  572. int download_and_parse_manifest(const std::string & url, const std::vector<std::string> & headers,
  573. nlohmann::json & manifest) {
  574. std::string manifest_str;
  575. int ret = download(url, "", false, headers, &manifest_str);
  576. if (ret) {
  577. return ret;
  578. }
  579. manifest = nlohmann::json::parse(manifest_str);
  580. return 0;
  581. }
  582. int huggingface_dl(std::string & model, const std::string & bn) {
  583. // Find the second occurrence of '/' after protocol string
  584. size_t pos = model.find('/');
  585. pos = model.find('/', pos + 1);
  586. std::string hfr, hff;
  587. std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" };
  588. std::string url;
  589. std::string model_endpoint = get_model_endpoint();
  590. if (pos == std::string::npos) {
  591. auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + "v2/");
  592. hfr = model_name;
  593. nlohmann::json manifest;
  594. int ret = download_and_parse_manifest(manifest_url, headers, manifest);
  595. if (ret) {
  596. return ret;
  597. }
  598. hff = manifest["ggufFile"]["rfilename"];
  599. } else {
  600. hfr = model.substr(0, pos);
  601. hff = model.substr(pos + 1);
  602. }
  603. url = model_endpoint + hfr + "/resolve/main/" + hff;
  604. return download(url, bn, true, headers);
  605. }
  606. int ollama_dl(std::string & model, const std::string & bn) {
  607. const std::vector<std::string> headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  608. if (model.find('/') == std::string::npos) {
  609. model = "library/" + model;
  610. }
  611. auto [model_name, manifest_url] = extract_model_and_tag(model, "https://registry.ollama.ai/v2/");
  612. nlohmann::json manifest;
  613. int ret = download_and_parse_manifest(manifest_url, {}, manifest);
  614. if (ret) {
  615. return ret;
  616. }
  617. std::string layer;
  618. for (const auto & l : manifest["layers"]) {
  619. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  620. layer = l["digest"];
  621. break;
  622. }
  623. }
  624. std::string blob_url = "https://registry.ollama.ai/v2/" + model_name + "/blobs/" + layer;
  625. return download(blob_url, bn, true, headers);
  626. }
  627. int github_dl(const std::string & model, const std::string & bn) {
  628. std::string repository = model;
  629. std::string branch = "main";
  630. const size_t at_pos = model.find('@');
  631. if (at_pos != std::string::npos) {
  632. repository = model.substr(0, at_pos);
  633. branch = model.substr(at_pos + 1);
  634. }
  635. const std::vector<std::string> repo_parts = string_split(repository, "/");
  636. if (repo_parts.size() < 3) {
  637. printe("Invalid GitHub repository format\n");
  638. return 1;
  639. }
  640. const std::string & org = repo_parts[0];
  641. const std::string & project = repo_parts[1];
  642. std::string url = "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch;
  643. for (size_t i = 2; i < repo_parts.size(); ++i) {
  644. url += "/" + repo_parts[i];
  645. }
  646. return download(url, bn, true);
  647. }
  648. int s3_dl(const std::string & model, const std::string & bn) {
  649. const size_t slash_pos = model.find('/');
  650. if (slash_pos == std::string::npos) {
  651. return 1;
  652. }
  653. const std::string bucket = model.substr(0, slash_pos);
  654. const std::string key = model.substr(slash_pos + 1);
  655. const char * access_key = std::getenv("AWS_ACCESS_KEY_ID");
  656. const char * secret_key = std::getenv("AWS_SECRET_ACCESS_KEY");
  657. if (!access_key || !secret_key) {
  658. printe("AWS credentials not found in environment\n");
  659. return 1;
  660. }
  661. // Generate AWS Signature Version 4 headers
  662. // (Implementation requires HMAC-SHA256 and date handling)
  663. // Get current timestamp
  664. const time_t now = time(nullptr);
  665. const tm tm = *gmtime(&now);
  666. const std::string date = strftime_fmt("%Y%m%d", tm);
  667. const std::string datetime = strftime_fmt("%Y%m%dT%H%M%SZ", tm);
  668. const std::vector<std::string> headers = {
  669. "Authorization: AWS4-HMAC-SHA256 Credential=" + std::string(access_key) + "/" + date +
  670. "/us-east-1/s3/aws4_request",
  671. "x-amz-content-sha256: UNSIGNED-PAYLOAD", "x-amz-date: " + datetime
  672. };
  673. const std::string url = "https://" + bucket + ".s3.amazonaws.com/" + key;
  674. return download(url, bn, true, headers);
  675. }
  676. std::string basename(const std::string & path) {
  677. const size_t pos = path.find_last_of("/\\");
  678. if (pos == std::string::npos) {
  679. return path;
  680. }
  681. return path.substr(pos + 1);
  682. }
  683. int rm_until_substring(std::string & model_, const std::string & substring) {
  684. const std::string::size_type pos = model_.find(substring);
  685. if (pos == std::string::npos) {
  686. return 1;
  687. }
  688. model_ = model_.substr(pos + substring.size()); // Skip past the substring
  689. return 0;
  690. }
  691. int resolve_model(std::string & model_) {
  692. int ret = 0;
  693. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  694. rm_until_substring(model_, "://");
  695. return ret;
  696. }
  697. const std::string bn = basename(model_);
  698. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://") ||
  699. string_starts_with(model_, "hf.co/")) {
  700. rm_until_substring(model_, "hf.co/");
  701. rm_until_substring(model_, "://");
  702. ret = huggingface_dl(model_, bn);
  703. } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) &&
  704. !string_starts_with(model_, "https://ollama.com/library/")) {
  705. ret = download(model_, bn, true);
  706. } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) {
  707. rm_until_substring(model_, "github:");
  708. rm_until_substring(model_, "://");
  709. ret = github_dl(model_, bn);
  710. } else if (string_starts_with(model_, "s3://")) {
  711. rm_until_substring(model_, "://");
  712. ret = s3_dl(model_, bn);
  713. } else { // ollama:// or nothing
  714. rm_until_substring(model_, "ollama.com/library/");
  715. rm_until_substring(model_, "://");
  716. ret = ollama_dl(model_, bn);
  717. }
  718. model_ = bn;
  719. return ret;
  720. }
  721. // Initializes the model and returns a unique pointer to it
  722. llama_model_ptr initialize_model(Opt & opt) {
  723. ggml_backend_load_all();
  724. resolve_model(opt.model_);
  725. printe("\r" LOG_CLR_TO_EOL "Loading model");
  726. llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
  727. if (!model) {
  728. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  729. }
  730. printe("\r" LOG_CLR_TO_EOL);
  731. return model;
  732. }
  733. // Initializes the context with the specified parameters
  734. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  735. llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params));
  736. if (!context) {
  737. printe("%s: error: failed to create the llama_context\n", __func__);
  738. }
  739. return context;
  740. }
  741. // Initializes and configures the sampler
  742. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  743. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  744. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  745. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  746. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  747. return sampler;
  748. }
  749. };
  750. // Add a message to `messages` and store its content in `msg_strs`
  751. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  752. llama_data.msg_strs.push_back(std::move(text));
  753. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  754. }
  755. // Function to apply the chat template and resize `formatted` if needed
  756. static int apply_chat_template(const struct common_chat_templates * tmpls, LlamaData & llama_data, const bool append, bool use_jinja) {
  757. common_chat_templates_inputs inputs;
  758. for (const auto & msg : llama_data.messages) {
  759. common_chat_msg cmsg;
  760. cmsg.role = msg.role;
  761. cmsg.content = msg.content;
  762. inputs.messages.push_back(cmsg);
  763. }
  764. inputs.add_generation_prompt = append;
  765. inputs.use_jinja = use_jinja;
  766. auto chat_params = common_chat_templates_apply(tmpls, inputs);
  767. // TODO: use other params for tool calls.
  768. auto result = chat_params.prompt;
  769. llama_data.fmtted.resize(result.size() + 1);
  770. memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
  771. return result.size();
  772. }
  773. // Function to tokenize the prompt
  774. static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
  775. std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
  776. const bool is_first = llama_kv_self_used_cells(llama_data.context.get()) == 0;
  777. const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
  778. prompt_tokens.resize(n_prompt_tokens);
  779. if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), is_first,
  780. true) < 0) {
  781. printe("failed to tokenize the prompt\n");
  782. return -1;
  783. }
  784. return n_prompt_tokens;
  785. }
  786. // Check if we have enough space in the context to evaluate this batch
  787. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  788. const int n_ctx = llama_n_ctx(ctx.get());
  789. const int n_ctx_used = llama_kv_self_used_cells(ctx.get());
  790. if (n_ctx_used + batch.n_tokens > n_ctx) {
  791. printf(LOG_COL_DEFAULT "\n");
  792. printe("context size exceeded\n");
  793. return 1;
  794. }
  795. return 0;
  796. }
  797. // convert the token to a string
  798. static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) {
  799. char buf[256];
  800. int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true);
  801. if (n < 0) {
  802. printe("failed to convert token to piece\n");
  803. return 1;
  804. }
  805. piece = std::string(buf, n);
  806. return 0;
  807. }
  808. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  809. printf("%s", piece.c_str());
  810. fflush(stdout);
  811. response += piece;
  812. }
  813. // helper function to evaluate a prompt and generate a response
  814. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  815. const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get());
  816. std::vector<llama_token> tokens;
  817. if (tokenize_prompt(vocab, prompt, tokens, llama_data) < 0) {
  818. return 1;
  819. }
  820. // prepare a batch for the prompt
  821. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  822. llama_token new_token_id;
  823. while (true) {
  824. check_context_size(llama_data.context, batch);
  825. if (llama_decode(llama_data.context.get(), batch)) {
  826. printe("failed to decode\n");
  827. return 1;
  828. }
  829. // sample the next token, check is it an end of generation?
  830. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  831. if (llama_vocab_is_eog(vocab, new_token_id)) {
  832. break;
  833. }
  834. std::string piece;
  835. if (convert_token_to_string(vocab, new_token_id, piece)) {
  836. return 1;
  837. }
  838. print_word_and_concatenate_to_response(piece, response);
  839. // prepare the next batch with the sampled token
  840. batch = llama_batch_get_one(&new_token_id, 1);
  841. }
  842. printf(LOG_COL_DEFAULT);
  843. return 0;
  844. }
  845. static int read_user_input(std::string & user_input) {
  846. static const char * prompt_prefix_env = std::getenv("LLAMA_PROMPT_PREFIX");
  847. static const char * prompt_prefix = prompt_prefix_env ? prompt_prefix_env : "> ";
  848. #ifdef WIN32
  849. printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
  850. std::getline(std::cin, user_input);
  851. if (std::cin.eof()) {
  852. printf("\n");
  853. return 1;
  854. }
  855. #else
  856. std::unique_ptr<char, decltype(&std::free)> line(const_cast<char *>(linenoise(prompt_prefix)), free);
  857. if (!line) {
  858. return 1;
  859. }
  860. user_input = line.get();
  861. #endif
  862. if (user_input == "/bye") {
  863. return 1;
  864. }
  865. if (user_input.empty()) {
  866. return 2;
  867. }
  868. #ifndef WIN32
  869. linenoiseHistoryAdd(line.get());
  870. #endif
  871. return 0; // Should have data in happy path
  872. }
  873. // Function to generate a response based on the prompt
  874. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  875. const bool stdout_a_terminal) {
  876. // Set response color
  877. if (stdout_a_terminal) {
  878. printf(LOG_COL_YELLOW);
  879. }
  880. if (generate(llama_data, prompt, response)) {
  881. printe("failed to generate response\n");
  882. return 1;
  883. }
  884. // End response with color reset and newline
  885. printf("\n%s", stdout_a_terminal ? LOG_COL_DEFAULT : "");
  886. return 0;
  887. }
  888. // Helper function to apply the chat template and handle errors
  889. static int apply_chat_template_with_error_handling(const common_chat_templates * tmpls, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
  890. const int new_len = apply_chat_template(tmpls, llama_data, append, use_jinja);
  891. if (new_len < 0) {
  892. printe("failed to apply the chat template\n");
  893. return -1;
  894. }
  895. output_length = new_len;
  896. return 0;
  897. }
  898. // Helper function to handle user input
  899. static int handle_user_input(std::string & user_input, const std::string & user) {
  900. if (!user.empty()) {
  901. user_input = user;
  902. return 0; // No need for interactive input
  903. }
  904. return read_user_input(user_input); // Returns true if input ends the loop
  905. }
  906. static bool is_stdin_a_terminal() {
  907. #if defined(_WIN32)
  908. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  909. DWORD mode;
  910. return GetConsoleMode(hStdin, &mode);
  911. #else
  912. return isatty(STDIN_FILENO);
  913. #endif
  914. }
  915. static bool is_stdout_a_terminal() {
  916. #if defined(_WIN32)
  917. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  918. DWORD mode;
  919. return GetConsoleMode(hStdout, &mode);
  920. #else
  921. return isatty(STDOUT_FILENO);
  922. #endif
  923. }
  924. // Function to handle user input
  925. static int get_user_input(std::string & user_input, const std::string & user) {
  926. while (true) {
  927. const int ret = handle_user_input(user_input, user);
  928. if (ret == 1) {
  929. return 1;
  930. }
  931. if (ret == 2) {
  932. continue;
  933. }
  934. break;
  935. }
  936. return 0;
  937. }
  938. // Reads a chat template file to be used
  939. static std::string read_chat_template_file(const std::string & chat_template_file) {
  940. File file;
  941. if (!file.open(chat_template_file, "r")) {
  942. printe("Error opening chat template file '%s': %s", chat_template_file.c_str(), strerror(errno));
  943. return "";
  944. }
  945. return file.to_string();
  946. }
  947. static int process_user_message(const Opt & opt, const std::string & user_input, LlamaData & llama_data,
  948. const common_chat_templates_ptr & chat_templates, int & prev_len,
  949. const bool stdout_a_terminal) {
  950. add_message("user", opt.user.empty() ? user_input : opt.user, llama_data);
  951. int new_len;
  952. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, true, new_len, opt.use_jinja) < 0) {
  953. return 1;
  954. }
  955. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  956. std::string response;
  957. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  958. return 1;
  959. }
  960. if (!opt.user.empty()) {
  961. return 2;
  962. }
  963. add_message("assistant", response, llama_data);
  964. if (apply_chat_template_with_error_handling(chat_templates.get(), llama_data, false, prev_len, opt.use_jinja) < 0) {
  965. return 1;
  966. }
  967. return 0;
  968. }
  969. // Main chat loop function
  970. static int chat_loop(LlamaData & llama_data, const Opt & opt) {
  971. int prev_len = 0;
  972. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  973. std::string chat_template;
  974. if (!opt.chat_template_file.empty()) {
  975. chat_template = read_chat_template_file(opt.chat_template_file);
  976. }
  977. common_chat_templates_ptr chat_templates = common_chat_templates_init(llama_data.model.get(), chat_template);
  978. static const bool stdout_a_terminal = is_stdout_a_terminal();
  979. while (true) {
  980. // Get user input
  981. std::string user_input;
  982. if (get_user_input(user_input, opt.user) == 1) {
  983. return 0;
  984. }
  985. const int ret = process_user_message(opt, user_input, llama_data, chat_templates, prev_len, stdout_a_terminal);
  986. if (ret == 1) {
  987. return 1;
  988. } else if (ret == 2) {
  989. break;
  990. }
  991. }
  992. return 0;
  993. }
  994. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  995. const Opt * opt = static_cast<Opt *>(p);
  996. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  997. printe("%s", text);
  998. }
  999. }
  1000. static std::string read_pipe_data() {
  1001. std::ostringstream result;
  1002. result << std::cin.rdbuf(); // Read all data from std::cin
  1003. return result.str();
  1004. }
  1005. static void ctrl_c_handling() {
  1006. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  1007. struct sigaction sigint_action;
  1008. sigint_action.sa_handler = sigint_handler;
  1009. sigemptyset(&sigint_action.sa_mask);
  1010. sigint_action.sa_flags = 0;
  1011. sigaction(SIGINT, &sigint_action, NULL);
  1012. #elif defined(_WIN32)
  1013. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  1014. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  1015. };
  1016. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  1017. #endif
  1018. }
  1019. int main(int argc, const char ** argv) {
  1020. ctrl_c_handling();
  1021. Opt opt;
  1022. const int ret = opt.init(argc, argv);
  1023. if (ret == 2) {
  1024. return 0;
  1025. } else if (ret) {
  1026. return 1;
  1027. }
  1028. if (!is_stdin_a_terminal()) {
  1029. if (!opt.user.empty()) {
  1030. opt.user += "\n\n";
  1031. }
  1032. opt.user += read_pipe_data();
  1033. }
  1034. llama_log_set(log_callback, &opt);
  1035. LlamaData llama_data;
  1036. if (llama_data.init(opt)) {
  1037. return 1;
  1038. }
  1039. if (chat_loop(llama_data, opt)) {
  1040. return 1;
  1041. }
  1042. return 0;
  1043. }