1
0

run.cpp 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. #if defined(_WIN32)
  2. # include <windows.h>
  3. # include <io.h>
  4. #else
  5. # include <sys/file.h>
  6. # include <sys/ioctl.h>
  7. # include <unistd.h>
  8. #endif
  9. #if defined(LLAMA_USE_CURL)
  10. # include <curl/curl.h>
  11. #endif
  12. #include <signal.h>
  13. #include <climits>
  14. #include <cstdarg>
  15. #include <cstdio>
  16. #include <cstring>
  17. #include <filesystem>
  18. #include <iostream>
  19. #include <sstream>
  20. #include <string>
  21. #include <vector>
  22. #include "common.h"
  23. #include "json.hpp"
  24. #include "llama-cpp.h"
  25. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
  26. [[noreturn]] static void sigint_handler(int) {
  27. printf("\n");
  28. exit(0); // not ideal, but it's the only way to guarantee exit in all cases
  29. }
  30. #endif
  31. GGML_ATTRIBUTE_FORMAT(1, 2)
  32. static std::string fmt(const char * fmt, ...) {
  33. va_list ap;
  34. va_list ap2;
  35. va_start(ap, fmt);
  36. va_copy(ap2, ap);
  37. const int size = vsnprintf(NULL, 0, fmt, ap);
  38. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  39. std::string buf;
  40. buf.resize(size);
  41. const int size2 = vsnprintf(const_cast<char *>(buf.data()), buf.size() + 1, fmt, ap2);
  42. GGML_ASSERT(size2 == size);
  43. va_end(ap2);
  44. va_end(ap);
  45. return buf;
  46. }
  47. GGML_ATTRIBUTE_FORMAT(1, 2)
  48. static int printe(const char * fmt, ...) {
  49. va_list args;
  50. va_start(args, fmt);
  51. const int ret = vfprintf(stderr, fmt, args);
  52. va_end(args);
  53. return ret;
  54. }
  55. class Opt {
  56. public:
  57. int init(int argc, const char ** argv) {
  58. ctx_params = llama_context_default_params();
  59. model_params = llama_model_default_params();
  60. context_size_default = ctx_params.n_batch;
  61. ngl_default = model_params.n_gpu_layers;
  62. common_params_sampling sampling;
  63. temperature_default = sampling.temp;
  64. if (argc < 2) {
  65. printe("Error: No arguments provided.\n");
  66. print_help();
  67. return 1;
  68. }
  69. // Parse arguments
  70. if (parse(argc, argv)) {
  71. printe("Error: Failed to parse arguments.\n");
  72. print_help();
  73. return 1;
  74. }
  75. // If help is requested, show help and exit
  76. if (help) {
  77. print_help();
  78. return 2;
  79. }
  80. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  81. ctx_params.n_ctx = ctx_params.n_batch;
  82. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  83. temperature = temperature >= 0 ? temperature : temperature_default;
  84. return 0; // Success
  85. }
  86. llama_context_params ctx_params;
  87. llama_model_params model_params;
  88. std::string model_;
  89. std::string user;
  90. int context_size = -1, ngl = -1;
  91. float temperature = -1;
  92. bool verbose = false;
  93. private:
  94. int context_size_default = -1, ngl_default = -1;
  95. float temperature_default = -1;
  96. bool help = false;
  97. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  98. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  99. }
  100. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  101. if (i + 1 >= argc) {
  102. return 1;
  103. }
  104. option_value = std::atoi(argv[++i]);
  105. return 0;
  106. }
  107. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  108. if (i + 1 >= argc) {
  109. return 1;
  110. }
  111. option_value = std::atof(argv[++i]);
  112. return 0;
  113. }
  114. int parse(int argc, const char ** argv) {
  115. bool options_parsing = true;
  116. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  117. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  118. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  119. return 1;
  120. }
  121. } else if (options_parsing && (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  122. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  123. return 1;
  124. }
  125. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  126. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  127. return 1;
  128. }
  129. } else if (options_parsing &&
  130. (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  131. verbose = true;
  132. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  133. help = true;
  134. return 0;
  135. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  136. options_parsing = false;
  137. } else if (positional_args_i == 0) {
  138. if (!argv[i][0] || argv[i][0] == '-') {
  139. return 1;
  140. }
  141. ++positional_args_i;
  142. model_ = argv[i];
  143. } else if (positional_args_i == 1) {
  144. ++positional_args_i;
  145. user = argv[i];
  146. } else {
  147. user += " " + std::string(argv[i]);
  148. }
  149. }
  150. return 0;
  151. }
  152. void print_help() const {
  153. printf(
  154. "Description:\n"
  155. " Runs a llm\n"
  156. "\n"
  157. "Usage:\n"
  158. " llama-run [options] model [prompt]\n"
  159. "\n"
  160. "Options:\n"
  161. " -c, --context-size <value>\n"
  162. " Context size (default: %d)\n"
  163. " -n, --ngl <value>\n"
  164. " Number of GPU layers (default: %d)\n"
  165. " --temp <value>\n"
  166. " Temperature (default: %.1f)\n"
  167. " -v, --verbose, --log-verbose\n"
  168. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  169. " -h, --help\n"
  170. " Show help message\n"
  171. "\n"
  172. "Commands:\n"
  173. " model\n"
  174. " Model is a string with an optional prefix of \n"
  175. " huggingface:// (hf://), ollama://, https:// or file://.\n"
  176. " If no protocol is specified and a file exists in the specified\n"
  177. " path, file:// is assumed, otherwise if a file does not exist in\n"
  178. " the specified path, ollama:// is assumed. Models that are being\n"
  179. " pulled are downloaded with .partial extension while being\n"
  180. " downloaded and then renamed as the file without the .partial\n"
  181. " extension when complete.\n"
  182. "\n"
  183. "Examples:\n"
  184. " llama-run llama3\n"
  185. " llama-run ollama://granite-code\n"
  186. " llama-run ollama://smollm:135m\n"
  187. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  188. " llama-run "
  189. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  190. " llama-run https://example.com/some-file1.gguf\n"
  191. " llama-run some-file2.gguf\n"
  192. " llama-run file://some-file3.gguf\n"
  193. " llama-run --ngl 999 some-file4.gguf\n"
  194. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  195. context_size_default, ngl_default, temperature_default);
  196. }
  197. };
  198. struct progress_data {
  199. size_t file_size = 0;
  200. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  201. bool printed = false;
  202. };
  203. static int get_terminal_width() {
  204. #if defined(_WIN32)
  205. CONSOLE_SCREEN_BUFFER_INFO csbi;
  206. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  207. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  208. #else
  209. struct winsize w;
  210. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  211. return w.ws_col;
  212. #endif
  213. }
  214. #ifdef LLAMA_USE_CURL
  215. class File {
  216. public:
  217. FILE * file = nullptr;
  218. FILE * open(const std::string & filename, const char * mode) {
  219. file = fopen(filename.c_str(), mode);
  220. return file;
  221. }
  222. int lock() {
  223. if (file) {
  224. # ifdef _WIN32
  225. fd = _fileno(file);
  226. hFile = (HANDLE) _get_osfhandle(fd);
  227. if (hFile == INVALID_HANDLE_VALUE) {
  228. fd = -1;
  229. return 1;
  230. }
  231. OVERLAPPED overlapped = {};
  232. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  233. &overlapped)) {
  234. fd = -1;
  235. return 1;
  236. }
  237. # else
  238. fd = fileno(file);
  239. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  240. fd = -1;
  241. return 1;
  242. }
  243. # endif
  244. }
  245. return 0;
  246. }
  247. ~File() {
  248. if (fd >= 0) {
  249. # ifdef _WIN32
  250. if (hFile != INVALID_HANDLE_VALUE) {
  251. OVERLAPPED overlapped = {};
  252. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  253. }
  254. # else
  255. flock(fd, LOCK_UN);
  256. # endif
  257. }
  258. if (file) {
  259. fclose(file);
  260. }
  261. }
  262. private:
  263. int fd = -1;
  264. # ifdef _WIN32
  265. HANDLE hFile = nullptr;
  266. # endif
  267. };
  268. class HttpClient {
  269. public:
  270. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  271. const bool progress, std::string * response_str = nullptr) {
  272. std::string output_file_partial;
  273. curl = curl_easy_init();
  274. if (!curl) {
  275. return 1;
  276. }
  277. progress_data data;
  278. File out;
  279. if (!output_file.empty()) {
  280. output_file_partial = output_file + ".partial";
  281. if (!out.open(output_file_partial, "ab")) {
  282. printe("Failed to open file\n");
  283. return 1;
  284. }
  285. if (out.lock()) {
  286. printe("Failed to exclusively lock file\n");
  287. return 1;
  288. }
  289. }
  290. set_write_options(response_str, out);
  291. data.file_size = set_resume_point(output_file_partial);
  292. set_progress_options(progress, data);
  293. set_headers(headers);
  294. perform(url);
  295. if (!output_file.empty()) {
  296. std::filesystem::rename(output_file_partial, output_file);
  297. }
  298. return 0;
  299. }
  300. ~HttpClient() {
  301. if (chunk) {
  302. curl_slist_free_all(chunk);
  303. }
  304. if (curl) {
  305. curl_easy_cleanup(curl);
  306. }
  307. }
  308. private:
  309. CURL * curl = nullptr;
  310. struct curl_slist * chunk = nullptr;
  311. void set_write_options(std::string * response_str, const File & out) {
  312. if (response_str) {
  313. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  314. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  315. } else {
  316. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  317. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  318. }
  319. }
  320. size_t set_resume_point(const std::string & output_file) {
  321. size_t file_size = 0;
  322. if (std::filesystem::exists(output_file)) {
  323. file_size = std::filesystem::file_size(output_file);
  324. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  325. }
  326. return file_size;
  327. }
  328. void set_progress_options(bool progress, progress_data & data) {
  329. if (progress) {
  330. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  331. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  332. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  333. }
  334. }
  335. void set_headers(const std::vector<std::string> & headers) {
  336. if (!headers.empty()) {
  337. if (chunk) {
  338. curl_slist_free_all(chunk);
  339. chunk = 0;
  340. }
  341. for (const auto & header : headers) {
  342. chunk = curl_slist_append(chunk, header.c_str());
  343. }
  344. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  345. }
  346. }
  347. void perform(const std::string & url) {
  348. CURLcode res;
  349. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  350. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  351. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  352. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  353. res = curl_easy_perform(curl);
  354. if (res != CURLE_OK) {
  355. printe("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
  356. }
  357. }
  358. static std::string human_readable_time(double seconds) {
  359. int hrs = static_cast<int>(seconds) / 3600;
  360. int mins = (static_cast<int>(seconds) % 3600) / 60;
  361. int secs = static_cast<int>(seconds) % 60;
  362. if (hrs > 0) {
  363. return fmt("%dh %02dm %02ds", hrs, mins, secs);
  364. } else if (mins > 0) {
  365. return fmt("%dm %02ds", mins, secs);
  366. } else {
  367. return fmt("%ds", secs);
  368. }
  369. }
  370. static std::string human_readable_size(curl_off_t size) {
  371. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  372. char length = sizeof(suffix) / sizeof(suffix[0]);
  373. int i = 0;
  374. double dbl_size = size;
  375. if (size > 1024) {
  376. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  377. dbl_size = size / 1024.0;
  378. }
  379. }
  380. return fmt("%.2f %s", dbl_size, suffix[i]);
  381. }
  382. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  383. curl_off_t) {
  384. progress_data * data = static_cast<progress_data *>(ptr);
  385. if (total_to_download <= 0) {
  386. return 0;
  387. }
  388. total_to_download += data->file_size;
  389. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  390. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  391. std::string progress_prefix = generate_progress_prefix(percentage);
  392. const double speed = calculate_speed(now_downloaded, data->start_time);
  393. const double tim = (total_to_download - now_downloaded) / speed;
  394. std::string progress_suffix =
  395. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  396. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  397. std::string progress_bar;
  398. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  399. print_progress(progress_prefix, progress_bar, progress_suffix);
  400. data->printed = true;
  401. return 0;
  402. }
  403. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  404. return (now_downloaded_plus_file_size * 100) / total_to_download;
  405. }
  406. static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", static_cast<long int>(percentage)); }
  407. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  408. const auto now = std::chrono::steady_clock::now();
  409. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  410. return now_downloaded / elapsed_seconds.count();
  411. }
  412. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  413. double speed, double estimated_time) {
  414. const int width = 10;
  415. return fmt("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(), width,
  416. human_readable_size(total_to_download).c_str(), width, human_readable_size(speed).c_str(), width,
  417. human_readable_time(estimated_time).c_str());
  418. }
  419. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  420. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  421. if (progress_bar_width < 1) {
  422. progress_bar_width = 1;
  423. }
  424. return progress_bar_width;
  425. }
  426. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  427. std::string & progress_bar) {
  428. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  429. for (int i = 0; i < progress_bar_width; ++i) {
  430. progress_bar.append((i < pos) ? "█" : " ");
  431. }
  432. return progress_bar;
  433. }
  434. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  435. const std::string & progress_suffix) {
  436. printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(),
  437. progress_suffix.c_str());
  438. }
  439. // Function to write data to a file
  440. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  441. FILE * out = static_cast<FILE *>(stream);
  442. return fwrite(ptr, size, nmemb, out);
  443. }
  444. // Function to capture data into a string
  445. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  446. std::string * str = static_cast<std::string *>(stream);
  447. str->append(static_cast<char *>(ptr), size * nmemb);
  448. return size * nmemb;
  449. }
  450. };
  451. #endif
  452. class LlamaData {
  453. public:
  454. llama_model_ptr model;
  455. llama_sampler_ptr sampler;
  456. llama_context_ptr context;
  457. std::vector<llama_chat_message> messages;
  458. std::vector<std::string> msg_strs;
  459. std::vector<char> fmtted;
  460. int init(Opt & opt) {
  461. model = initialize_model(opt);
  462. if (!model) {
  463. return 1;
  464. }
  465. context = initialize_context(model, opt);
  466. if (!context) {
  467. return 1;
  468. }
  469. sampler = initialize_sampler(opt);
  470. return 0;
  471. }
  472. private:
  473. #ifdef LLAMA_USE_CURL
  474. int download(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  475. const bool progress, std::string * response_str = nullptr) {
  476. HttpClient http;
  477. if (http.init(url, headers, output_file, progress, response_str)) {
  478. return 1;
  479. }
  480. return 0;
  481. }
  482. #else
  483. int download(const std::string &, const std::vector<std::string> &, const std::string &, const bool,
  484. std::string * = nullptr) {
  485. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  486. return 1;
  487. }
  488. #endif
  489. int huggingface_dl(const std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  490. // Find the second occurrence of '/' after protocol string
  491. size_t pos = model.find('/');
  492. pos = model.find('/', pos + 1);
  493. if (pos == std::string::npos) {
  494. return 1;
  495. }
  496. const std::string hfr = model.substr(0, pos);
  497. const std::string hff = model.substr(pos + 1);
  498. const std::string url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff;
  499. return download(url, headers, bn, true);
  500. }
  501. int ollama_dl(std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  502. if (model.find('/') == std::string::npos) {
  503. model = "library/" + model;
  504. }
  505. std::string model_tag = "latest";
  506. size_t colon_pos = model.find(':');
  507. if (colon_pos != std::string::npos) {
  508. model_tag = model.substr(colon_pos + 1);
  509. model = model.substr(0, colon_pos);
  510. }
  511. std::string manifest_url = "https://registry.ollama.ai/v2/" + model + "/manifests/" + model_tag;
  512. std::string manifest_str;
  513. const int ret = download(manifest_url, headers, "", false, &manifest_str);
  514. if (ret) {
  515. return ret;
  516. }
  517. nlohmann::json manifest = nlohmann::json::parse(manifest_str);
  518. std::string layer;
  519. for (const auto & l : manifest["layers"]) {
  520. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  521. layer = l["digest"];
  522. break;
  523. }
  524. }
  525. std::string blob_url = "https://registry.ollama.ai/v2/" + model + "/blobs/" + layer;
  526. return download(blob_url, headers, bn, true);
  527. }
  528. std::string basename(const std::string & path) {
  529. const size_t pos = path.find_last_of("/\\");
  530. if (pos == std::string::npos) {
  531. return path;
  532. }
  533. return path.substr(pos + 1);
  534. }
  535. int remove_proto(std::string & model_) {
  536. const std::string::size_type pos = model_.find("://");
  537. if (pos == std::string::npos) {
  538. return 1;
  539. }
  540. model_ = model_.substr(pos + 3); // Skip past "://"
  541. return 0;
  542. }
  543. int resolve_model(std::string & model_) {
  544. int ret = 0;
  545. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  546. remove_proto(model_);
  547. return ret;
  548. }
  549. const std::string bn = basename(model_);
  550. const std::vector<std::string> headers = { "--header",
  551. "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  552. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) {
  553. remove_proto(model_);
  554. ret = huggingface_dl(model_, headers, bn);
  555. } else if (string_starts_with(model_, "ollama://")) {
  556. remove_proto(model_);
  557. ret = ollama_dl(model_, headers, bn);
  558. } else if (string_starts_with(model_, "https://")) {
  559. download(model_, headers, bn, true);
  560. } else {
  561. ret = ollama_dl(model_, headers, bn);
  562. }
  563. model_ = bn;
  564. return ret;
  565. }
  566. // Initializes the model and returns a unique pointer to it
  567. llama_model_ptr initialize_model(Opt & opt) {
  568. ggml_backend_load_all();
  569. resolve_model(opt.model_);
  570. printe(
  571. "\r%*s"
  572. "\rLoading model",
  573. get_terminal_width(), " ");
  574. llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
  575. if (!model) {
  576. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  577. }
  578. printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " ");
  579. return model;
  580. }
  581. // Initializes the context with the specified parameters
  582. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  583. llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params));
  584. if (!context) {
  585. printe("%s: error: failed to create the llama_context\n", __func__);
  586. }
  587. return context;
  588. }
  589. // Initializes and configures the sampler
  590. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  591. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  592. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  593. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  594. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  595. return sampler;
  596. }
  597. };
  598. // Add a message to `messages` and store its content in `msg_strs`
  599. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  600. llama_data.msg_strs.push_back(std::move(text));
  601. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  602. }
  603. // Function to apply the chat template and resize `formatted` if needed
  604. static int apply_chat_template(LlamaData & llama_data, const bool append) {
  605. int result = llama_chat_apply_template(
  606. llama_model_chat_template(llama_data.model.get()), llama_data.messages.data(), llama_data.messages.size(), append,
  607. append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0);
  608. if (append && result > static_cast<int>(llama_data.fmtted.size())) {
  609. llama_data.fmtted.resize(result);
  610. result = llama_chat_apply_template(llama_model_chat_template(llama_data.model.get()), llama_data.messages.data(),
  611. llama_data.messages.size(), append, llama_data.fmtted.data(),
  612. llama_data.fmtted.size());
  613. }
  614. return result;
  615. }
  616. // Function to tokenize the prompt
  617. static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
  618. std::vector<llama_token> & prompt_tokens) {
  619. const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true);
  620. prompt_tokens.resize(n_prompt_tokens);
  621. if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true,
  622. true) < 0) {
  623. printe("failed to tokenize the prompt\n");
  624. return -1;
  625. }
  626. return n_prompt_tokens;
  627. }
  628. // Check if we have enough space in the context to evaluate this batch
  629. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  630. const int n_ctx = llama_n_ctx(ctx.get());
  631. const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get());
  632. if (n_ctx_used + batch.n_tokens > n_ctx) {
  633. printf("\033[0m\n");
  634. printe("context size exceeded\n");
  635. return 1;
  636. }
  637. return 0;
  638. }
  639. // convert the token to a string
  640. static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) {
  641. char buf[256];
  642. int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true);
  643. if (n < 0) {
  644. printe("failed to convert token to piece\n");
  645. return 1;
  646. }
  647. piece = std::string(buf, n);
  648. return 0;
  649. }
  650. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  651. printf("%s", piece.c_str());
  652. fflush(stdout);
  653. response += piece;
  654. }
  655. // helper function to evaluate a prompt and generate a response
  656. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  657. const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get());
  658. std::vector<llama_token> tokens;
  659. if (tokenize_prompt(vocab, prompt, tokens) < 0) {
  660. return 1;
  661. }
  662. // prepare a batch for the prompt
  663. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  664. llama_token new_token_id;
  665. while (true) {
  666. check_context_size(llama_data.context, batch);
  667. if (llama_decode(llama_data.context.get(), batch)) {
  668. printe("failed to decode\n");
  669. return 1;
  670. }
  671. // sample the next token, check is it an end of generation?
  672. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  673. if (llama_vocab_is_eog(vocab, new_token_id)) {
  674. break;
  675. }
  676. std::string piece;
  677. if (convert_token_to_string(vocab, new_token_id, piece)) {
  678. return 1;
  679. }
  680. print_word_and_concatenate_to_response(piece, response);
  681. // prepare the next batch with the sampled token
  682. batch = llama_batch_get_one(&new_token_id, 1);
  683. }
  684. return 0;
  685. }
  686. static int read_user_input(std::string & user) {
  687. std::getline(std::cin, user);
  688. if (std::cin.eof()) {
  689. printf("\n");
  690. return 1;
  691. }
  692. if (user == "/bye") {
  693. return 1;
  694. }
  695. if (user.empty()) {
  696. return 2;
  697. }
  698. return 0; // Should have data in happy path
  699. }
  700. // Function to generate a response based on the prompt
  701. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  702. const bool stdout_a_terminal) {
  703. // Set response color
  704. if (stdout_a_terminal) {
  705. printf("\033[33m");
  706. }
  707. if (generate(llama_data, prompt, response)) {
  708. printe("failed to generate response\n");
  709. return 1;
  710. }
  711. // End response with color reset and newline
  712. printf("\n%s", stdout_a_terminal ? "\033[0m" : "");
  713. return 0;
  714. }
  715. // Helper function to apply the chat template and handle errors
  716. static int apply_chat_template_with_error_handling(LlamaData & llama_data, const bool append, int & output_length) {
  717. const int new_len = apply_chat_template(llama_data, append);
  718. if (new_len < 0) {
  719. printe("failed to apply the chat template\n");
  720. return -1;
  721. }
  722. output_length = new_len;
  723. return 0;
  724. }
  725. // Helper function to handle user input
  726. static int handle_user_input(std::string & user_input, const std::string & user) {
  727. if (!user.empty()) {
  728. user_input = user;
  729. return 0; // No need for interactive input
  730. }
  731. printf(
  732. "\r%*s"
  733. "\r\033[32m> \033[0m",
  734. get_terminal_width(), " ");
  735. return read_user_input(user_input); // Returns true if input ends the loop
  736. }
  737. static bool is_stdin_a_terminal() {
  738. #if defined(_WIN32)
  739. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  740. DWORD mode;
  741. return GetConsoleMode(hStdin, &mode);
  742. #else
  743. return isatty(STDIN_FILENO);
  744. #endif
  745. }
  746. static bool is_stdout_a_terminal() {
  747. #if defined(_WIN32)
  748. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  749. DWORD mode;
  750. return GetConsoleMode(hStdout, &mode);
  751. #else
  752. return isatty(STDOUT_FILENO);
  753. #endif
  754. }
  755. // Function to handle user input
  756. static int get_user_input(std::string & user_input, const std::string & user) {
  757. while (true) {
  758. const int ret = handle_user_input(user_input, user);
  759. if (ret == 1) {
  760. return 1;
  761. }
  762. if (ret == 2) {
  763. continue;
  764. }
  765. break;
  766. }
  767. return 0;
  768. }
  769. // Main chat loop function
  770. static int chat_loop(LlamaData & llama_data, const std::string & user) {
  771. int prev_len = 0;
  772. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  773. static const bool stdout_a_terminal = is_stdout_a_terminal();
  774. while (true) {
  775. // Get user input
  776. std::string user_input;
  777. if (get_user_input(user_input, user) == 1) {
  778. return 0;
  779. }
  780. add_message("user", user.empty() ? user_input : user, llama_data);
  781. int new_len;
  782. if (apply_chat_template_with_error_handling(llama_data, true, new_len) < 0) {
  783. return 1;
  784. }
  785. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  786. std::string response;
  787. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  788. return 1;
  789. }
  790. if (!user.empty()) {
  791. break;
  792. }
  793. add_message("assistant", response, llama_data);
  794. if (apply_chat_template_with_error_handling(llama_data, false, prev_len) < 0) {
  795. return 1;
  796. }
  797. }
  798. return 0;
  799. }
  800. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  801. const Opt * opt = static_cast<Opt *>(p);
  802. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  803. printe("%s", text);
  804. }
  805. }
  806. static std::string read_pipe_data() {
  807. std::ostringstream result;
  808. result << std::cin.rdbuf(); // Read all data from std::cin
  809. return result.str();
  810. }
  811. static void ctrl_c_handling() {
  812. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  813. struct sigaction sigint_action;
  814. sigint_action.sa_handler = sigint_handler;
  815. sigemptyset(&sigint_action.sa_mask);
  816. sigint_action.sa_flags = 0;
  817. sigaction(SIGINT, &sigint_action, NULL);
  818. #elif defined(_WIN32)
  819. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  820. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  821. };
  822. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  823. #endif
  824. }
  825. int main(int argc, const char ** argv) {
  826. ctrl_c_handling();
  827. Opt opt;
  828. const int ret = opt.init(argc, argv);
  829. if (ret == 2) {
  830. return 0;
  831. } else if (ret) {
  832. return 1;
  833. }
  834. if (!is_stdin_a_terminal()) {
  835. if (!opt.user.empty()) {
  836. opt.user += "\n\n";
  837. }
  838. opt.user += read_pipe_data();
  839. }
  840. llama_log_set(log_callback, &opt);
  841. LlamaData llama_data;
  842. if (llama_data.init(opt)) {
  843. return 1;
  844. }
  845. if (chat_loop(llama_data, opt.user)) {
  846. return 1;
  847. }
  848. return 0;
  849. }