run.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. #if defined(_WIN32)
  2. # include <windows.h>
  3. #else
  4. # include <sys/file.h>
  5. # include <sys/ioctl.h>
  6. # include <unistd.h>
  7. #endif
  8. #if defined(LLAMA_USE_CURL)
  9. # include <curl/curl.h>
  10. #endif
  11. #include <climits>
  12. #include <cstdarg>
  13. #include <cstdio>
  14. #include <cstring>
  15. #include <filesystem>
  16. #include <iostream>
  17. #include <sstream>
  18. #include <string>
  19. #include <vector>
  20. #include "common.h"
  21. #include "json.hpp"
  22. #include "llama-cpp.h"
  23. GGML_ATTRIBUTE_FORMAT(1, 2)
  24. static std::string fmt(const char * fmt, ...) {
  25. va_list ap;
  26. va_list ap2;
  27. va_start(ap, fmt);
  28. va_copy(ap2, ap);
  29. const int size = vsnprintf(NULL, 0, fmt, ap);
  30. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  31. std::string buf;
  32. buf.resize(size);
  33. const int size2 = vsnprintf(const_cast<char *>(buf.data()), buf.size() + 1, fmt, ap2);
  34. GGML_ASSERT(size2 == size);
  35. va_end(ap2);
  36. va_end(ap);
  37. return buf;
  38. }
  39. GGML_ATTRIBUTE_FORMAT(1, 2)
  40. static int printe(const char * fmt, ...) {
  41. va_list args;
  42. va_start(args, fmt);
  43. const int ret = vfprintf(stderr, fmt, args);
  44. va_end(args);
  45. return ret;
  46. }
  47. class Opt {
  48. public:
  49. int init(int argc, const char ** argv) {
  50. ctx_params = llama_context_default_params();
  51. model_params = llama_model_default_params();
  52. context_size_default = ctx_params.n_batch;
  53. ngl_default = model_params.n_gpu_layers;
  54. common_params_sampling sampling;
  55. temperature_default = sampling.temp;
  56. if (argc < 2) {
  57. printe("Error: No arguments provided.\n");
  58. print_help();
  59. return 1;
  60. }
  61. // Parse arguments
  62. if (parse(argc, argv)) {
  63. printe("Error: Failed to parse arguments.\n");
  64. print_help();
  65. return 1;
  66. }
  67. // If help is requested, show help and exit
  68. if (help) {
  69. print_help();
  70. return 2;
  71. }
  72. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  73. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  74. temperature = temperature >= 0 ? temperature : temperature_default;
  75. return 0; // Success
  76. }
  77. llama_context_params ctx_params;
  78. llama_model_params model_params;
  79. std::string model_;
  80. std::string user;
  81. int context_size = -1, ngl = -1;
  82. float temperature = -1;
  83. bool verbose = false;
  84. private:
  85. int context_size_default = -1, ngl_default = -1;
  86. float temperature_default = -1;
  87. bool help = false;
  88. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  89. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  90. }
  91. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  92. if (i + 1 >= argc) {
  93. return 1;
  94. }
  95. option_value = std::atoi(argv[++i]);
  96. return 0;
  97. }
  98. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  99. if (i + 1 >= argc) {
  100. return 1;
  101. }
  102. option_value = std::atof(argv[++i]);
  103. return 0;
  104. }
  105. int parse(int argc, const char ** argv) {
  106. bool options_parsing = true;
  107. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  108. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  109. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  110. return 1;
  111. }
  112. } else if (options_parsing && (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  113. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  114. return 1;
  115. }
  116. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  117. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  118. return 1;
  119. }
  120. } else if (options_parsing &&
  121. (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  122. verbose = true;
  123. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  124. help = true;
  125. return 0;
  126. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  127. options_parsing = false;
  128. } else if (positional_args_i == 0) {
  129. if (!argv[i][0] || argv[i][0] == '-') {
  130. return 1;
  131. }
  132. ++positional_args_i;
  133. model_ = argv[i];
  134. } else if (positional_args_i == 1) {
  135. ++positional_args_i;
  136. user = argv[i];
  137. } else {
  138. user += " " + std::string(argv[i]);
  139. }
  140. }
  141. return 0;
  142. }
  143. void print_help() const {
  144. printf(
  145. "Description:\n"
  146. " Runs a llm\n"
  147. "\n"
  148. "Usage:\n"
  149. " llama-run [options] model [prompt]\n"
  150. "\n"
  151. "Options:\n"
  152. " -c, --context-size <value>\n"
  153. " Context size (default: %d)\n"
  154. " -n, --ngl <value>\n"
  155. " Number of GPU layers (default: %d)\n"
  156. " --temp <value>\n"
  157. " Temperature (default: %.1f)\n"
  158. " -v, --verbose, --log-verbose\n"
  159. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  160. " -h, --help\n"
  161. " Show help message\n"
  162. "\n"
  163. "Commands:\n"
  164. " model\n"
  165. " Model is a string with an optional prefix of \n"
  166. " huggingface:// (hf://), ollama://, https:// or file://.\n"
  167. " If no protocol is specified and a file exists in the specified\n"
  168. " path, file:// is assumed, otherwise if a file does not exist in\n"
  169. " the specified path, ollama:// is assumed. Models that are being\n"
  170. " pulled are downloaded with .partial extension while being\n"
  171. " downloaded and then renamed as the file without the .partial\n"
  172. " extension when complete.\n"
  173. "\n"
  174. "Examples:\n"
  175. " llama-run llama3\n"
  176. " llama-run ollama://granite-code\n"
  177. " llama-run ollama://smollm:135m\n"
  178. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  179. " llama-run "
  180. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  181. " llama-run https://example.com/some-file1.gguf\n"
  182. " llama-run some-file2.gguf\n"
  183. " llama-run file://some-file3.gguf\n"
  184. " llama-run --ngl 999 some-file4.gguf\n"
  185. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  186. context_size_default, ngl_default, temperature_default);
  187. }
  188. };
  189. struct progress_data {
  190. size_t file_size = 0;
  191. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  192. bool printed = false;
  193. };
  194. static int get_terminal_width() {
  195. #if defined(_WIN32)
  196. CONSOLE_SCREEN_BUFFER_INFO csbi;
  197. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  198. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  199. #else
  200. struct winsize w;
  201. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  202. return w.ws_col;
  203. #endif
  204. }
  205. #ifdef LLAMA_USE_CURL
  206. class File {
  207. public:
  208. FILE * file = nullptr;
  209. FILE * open(const std::string & filename, const char * mode) {
  210. file = fopen(filename.c_str(), mode);
  211. return file;
  212. }
  213. int lock() {
  214. if (file) {
  215. # ifdef _WIN32
  216. fd = _fileno(file);
  217. hFile = (HANDLE) _get_osfhandle(fd);
  218. if (hFile == INVALID_HANDLE_VALUE) {
  219. fd = -1;
  220. return 1;
  221. }
  222. OVERLAPPED overlapped = { 0 };
  223. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  224. &overlapped)) {
  225. fd = -1;
  226. return 1;
  227. }
  228. # else
  229. fd = fileno(file);
  230. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  231. fd = -1;
  232. return 1;
  233. }
  234. # endif
  235. }
  236. return 0;
  237. }
  238. ~File() {
  239. if (fd >= 0) {
  240. # ifdef _WIN32
  241. if (hFile != INVALID_HANDLE_VALUE) {
  242. OVERLAPPED overlapped = { 0 };
  243. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  244. }
  245. # else
  246. flock(fd, LOCK_UN);
  247. # endif
  248. }
  249. if (file) {
  250. fclose(file);
  251. }
  252. }
  253. private:
  254. int fd = -1;
  255. # ifdef _WIN32
  256. HANDLE hFile;
  257. # endif
  258. };
  259. class HttpClient {
  260. public:
  261. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  262. const bool progress, std::string * response_str = nullptr) {
  263. std::string output_file_partial;
  264. curl = curl_easy_init();
  265. if (!curl) {
  266. return 1;
  267. }
  268. progress_data data;
  269. File out;
  270. if (!output_file.empty()) {
  271. output_file_partial = output_file + ".partial";
  272. if (!out.open(output_file_partial, "ab")) {
  273. printe("Failed to open file\n");
  274. return 1;
  275. }
  276. if (out.lock()) {
  277. printe("Failed to exclusively lock file\n");
  278. return 1;
  279. }
  280. }
  281. set_write_options(response_str, out);
  282. data.file_size = set_resume_point(output_file_partial);
  283. set_progress_options(progress, data);
  284. set_headers(headers);
  285. perform(url);
  286. if (!output_file.empty()) {
  287. std::filesystem::rename(output_file_partial, output_file);
  288. }
  289. return 0;
  290. }
  291. ~HttpClient() {
  292. if (chunk) {
  293. curl_slist_free_all(chunk);
  294. }
  295. if (curl) {
  296. curl_easy_cleanup(curl);
  297. }
  298. }
  299. private:
  300. CURL * curl = nullptr;
  301. struct curl_slist * chunk = nullptr;
  302. void set_write_options(std::string * response_str, const File & out) {
  303. if (response_str) {
  304. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  305. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  306. } else {
  307. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  308. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  309. }
  310. }
  311. size_t set_resume_point(const std::string & output_file) {
  312. size_t file_size = 0;
  313. if (std::filesystem::exists(output_file)) {
  314. file_size = std::filesystem::file_size(output_file);
  315. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  316. }
  317. return file_size;
  318. }
  319. void set_progress_options(bool progress, progress_data & data) {
  320. if (progress) {
  321. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  322. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  323. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  324. }
  325. }
  326. void set_headers(const std::vector<std::string> & headers) {
  327. if (!headers.empty()) {
  328. if (chunk) {
  329. curl_slist_free_all(chunk);
  330. chunk = 0;
  331. }
  332. for (const auto & header : headers) {
  333. chunk = curl_slist_append(chunk, header.c_str());
  334. }
  335. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  336. }
  337. }
  338. void perform(const std::string & url) {
  339. CURLcode res;
  340. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  341. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  342. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  343. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  344. res = curl_easy_perform(curl);
  345. if (res != CURLE_OK) {
  346. printe("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
  347. }
  348. }
  349. static std::string human_readable_time(double seconds) {
  350. int hrs = static_cast<int>(seconds) / 3600;
  351. int mins = (static_cast<int>(seconds) % 3600) / 60;
  352. int secs = static_cast<int>(seconds) % 60;
  353. if (hrs > 0) {
  354. return fmt("%dh %02dm %02ds", hrs, mins, secs);
  355. } else if (mins > 0) {
  356. return fmt("%dm %02ds", mins, secs);
  357. } else {
  358. return fmt("%ds", secs);
  359. }
  360. }
  361. static std::string human_readable_size(curl_off_t size) {
  362. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  363. char length = sizeof(suffix) / sizeof(suffix[0]);
  364. int i = 0;
  365. double dbl_size = size;
  366. if (size > 1024) {
  367. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  368. dbl_size = size / 1024.0;
  369. }
  370. }
  371. return fmt("%.2f %s", dbl_size, suffix[i]);
  372. }
  373. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  374. curl_off_t) {
  375. progress_data * data = static_cast<progress_data *>(ptr);
  376. if (total_to_download <= 0) {
  377. return 0;
  378. }
  379. total_to_download += data->file_size;
  380. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  381. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  382. std::string progress_prefix = generate_progress_prefix(percentage);
  383. const double speed = calculate_speed(now_downloaded, data->start_time);
  384. const double tim = (total_to_download - now_downloaded) / speed;
  385. std::string progress_suffix =
  386. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  387. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  388. std::string progress_bar;
  389. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  390. print_progress(progress_prefix, progress_bar, progress_suffix);
  391. data->printed = true;
  392. return 0;
  393. }
  394. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  395. return (now_downloaded_plus_file_size * 100) / total_to_download;
  396. }
  397. static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", percentage); }
  398. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  399. const auto now = std::chrono::steady_clock::now();
  400. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  401. return now_downloaded / elapsed_seconds.count();
  402. }
  403. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  404. double speed, double estimated_time) {
  405. const int width = 10;
  406. return fmt("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(), width,
  407. human_readable_size(total_to_download).c_str(), width, human_readable_size(speed).c_str(), width,
  408. human_readable_time(estimated_time).c_str());
  409. }
  410. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  411. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  412. if (progress_bar_width < 1) {
  413. progress_bar_width = 1;
  414. }
  415. return progress_bar_width;
  416. }
  417. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  418. std::string & progress_bar) {
  419. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  420. for (int i = 0; i < progress_bar_width; ++i) {
  421. progress_bar.append((i < pos) ? "█" : " ");
  422. }
  423. return progress_bar;
  424. }
  425. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  426. const std::string & progress_suffix) {
  427. printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(),
  428. progress_suffix.c_str());
  429. }
  430. // Function to write data to a file
  431. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  432. FILE * out = static_cast<FILE *>(stream);
  433. return fwrite(ptr, size, nmemb, out);
  434. }
  435. // Function to capture data into a string
  436. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  437. std::string * str = static_cast<std::string *>(stream);
  438. str->append(static_cast<char *>(ptr), size * nmemb);
  439. return size * nmemb;
  440. }
  441. };
  442. #endif
  443. class LlamaData {
  444. public:
  445. llama_model_ptr model;
  446. llama_sampler_ptr sampler;
  447. llama_context_ptr context;
  448. std::vector<llama_chat_message> messages;
  449. std::vector<std::string> msg_strs;
  450. std::vector<char> fmtted;
  451. int init(Opt & opt) {
  452. model = initialize_model(opt);
  453. if (!model) {
  454. return 1;
  455. }
  456. context = initialize_context(model, opt);
  457. if (!context) {
  458. return 1;
  459. }
  460. sampler = initialize_sampler(opt);
  461. return 0;
  462. }
  463. private:
  464. #ifdef LLAMA_USE_CURL
  465. int download(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  466. const bool progress, std::string * response_str = nullptr) {
  467. HttpClient http;
  468. if (http.init(url, headers, output_file, progress, response_str)) {
  469. return 1;
  470. }
  471. return 0;
  472. }
  473. #else
  474. int download(const std::string &, const std::vector<std::string> &, const std::string &, const bool,
  475. std::string * = nullptr) {
  476. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  477. return 1;
  478. }
  479. #endif
  480. int huggingface_dl(const std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  481. // Find the second occurrence of '/' after protocol string
  482. size_t pos = model.find('/');
  483. pos = model.find('/', pos + 1);
  484. if (pos == std::string::npos) {
  485. return 1;
  486. }
  487. const std::string hfr = model.substr(0, pos);
  488. const std::string hff = model.substr(pos + 1);
  489. const std::string url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff;
  490. return download(url, headers, bn, true);
  491. }
  492. int ollama_dl(std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  493. if (model.find('/') == std::string::npos) {
  494. model = "library/" + model;
  495. }
  496. std::string model_tag = "latest";
  497. size_t colon_pos = model.find(':');
  498. if (colon_pos != std::string::npos) {
  499. model_tag = model.substr(colon_pos + 1);
  500. model = model.substr(0, colon_pos);
  501. }
  502. std::string manifest_url = "https://registry.ollama.ai/v2/" + model + "/manifests/" + model_tag;
  503. std::string manifest_str;
  504. const int ret = download(manifest_url, headers, "", false, &manifest_str);
  505. if (ret) {
  506. return ret;
  507. }
  508. nlohmann::json manifest = nlohmann::json::parse(manifest_str);
  509. std::string layer;
  510. for (const auto & l : manifest["layers"]) {
  511. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  512. layer = l["digest"];
  513. break;
  514. }
  515. }
  516. std::string blob_url = "https://registry.ollama.ai/v2/" + model + "/blobs/" + layer;
  517. return download(blob_url, headers, bn, true);
  518. }
  519. std::string basename(const std::string & path) {
  520. const size_t pos = path.find_last_of("/\\");
  521. if (pos == std::string::npos) {
  522. return path;
  523. }
  524. return path.substr(pos + 1);
  525. }
  526. int remove_proto(std::string & model_) {
  527. const std::string::size_type pos = model_.find("://");
  528. if (pos == std::string::npos) {
  529. return 1;
  530. }
  531. model_ = model_.substr(pos + 3); // Skip past "://"
  532. return 0;
  533. }
  534. int resolve_model(std::string & model_) {
  535. int ret = 0;
  536. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  537. remove_proto(model_);
  538. return ret;
  539. }
  540. const std::string bn = basename(model_);
  541. const std::vector<std::string> headers = { "--header",
  542. "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  543. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) {
  544. remove_proto(model_);
  545. ret = huggingface_dl(model_, headers, bn);
  546. } else if (string_starts_with(model_, "ollama://")) {
  547. remove_proto(model_);
  548. ret = ollama_dl(model_, headers, bn);
  549. } else if (string_starts_with(model_, "https://")) {
  550. download(model_, headers, bn, true);
  551. } else {
  552. ret = ollama_dl(model_, headers, bn);
  553. }
  554. model_ = bn;
  555. return ret;
  556. }
  557. // Initializes the model and returns a unique pointer to it
  558. llama_model_ptr initialize_model(Opt & opt) {
  559. ggml_backend_load_all();
  560. resolve_model(opt.model_);
  561. printe(
  562. "\r%*s"
  563. "\rLoading model",
  564. get_terminal_width(), " ");
  565. llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params));
  566. if (!model) {
  567. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  568. }
  569. printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " ");
  570. return model;
  571. }
  572. // Initializes the context with the specified parameters
  573. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  574. llama_context_ptr context(llama_new_context_with_model(model.get(), opt.ctx_params));
  575. if (!context) {
  576. printe("%s: error: failed to create the llama_context\n", __func__);
  577. }
  578. return context;
  579. }
  580. // Initializes and configures the sampler
  581. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  582. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  583. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  584. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  585. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  586. return sampler;
  587. }
  588. };
  589. // Add a message to `messages` and store its content in `msg_strs`
  590. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  591. llama_data.msg_strs.push_back(std::move(text));
  592. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  593. }
  594. // Function to apply the chat template and resize `formatted` if needed
  595. static int apply_chat_template(LlamaData & llama_data, const bool append) {
  596. int result = llama_chat_apply_template(
  597. llama_data.model.get(), nullptr, llama_data.messages.data(), llama_data.messages.size(), append,
  598. append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0);
  599. if (append && result > static_cast<int>(llama_data.fmtted.size())) {
  600. llama_data.fmtted.resize(result);
  601. result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(),
  602. llama_data.messages.size(), append, llama_data.fmtted.data(),
  603. llama_data.fmtted.size());
  604. }
  605. return result;
  606. }
  607. // Function to tokenize the prompt
  608. static int tokenize_prompt(const llama_model_ptr & model, const std::string & prompt,
  609. std::vector<llama_token> & prompt_tokens) {
  610. const int n_prompt_tokens = -llama_tokenize(model.get(), prompt.c_str(), prompt.size(), NULL, 0, true, true);
  611. prompt_tokens.resize(n_prompt_tokens);
  612. if (llama_tokenize(model.get(), prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true,
  613. true) < 0) {
  614. printe("failed to tokenize the prompt\n");
  615. return -1;
  616. }
  617. return n_prompt_tokens;
  618. }
  619. // Check if we have enough space in the context to evaluate this batch
  620. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  621. const int n_ctx = llama_n_ctx(ctx.get());
  622. const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get());
  623. if (n_ctx_used + batch.n_tokens > n_ctx) {
  624. printf("\033[0m\n");
  625. printe("context size exceeded\n");
  626. return 1;
  627. }
  628. return 0;
  629. }
  630. // convert the token to a string
  631. static int convert_token_to_string(const llama_model_ptr & model, const llama_token token_id, std::string & piece) {
  632. char buf[256];
  633. int n = llama_token_to_piece(model.get(), token_id, buf, sizeof(buf), 0, true);
  634. if (n < 0) {
  635. printe("failed to convert token to piece\n");
  636. return 1;
  637. }
  638. piece = std::string(buf, n);
  639. return 0;
  640. }
  641. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  642. printf("%s", piece.c_str());
  643. fflush(stdout);
  644. response += piece;
  645. }
  646. // helper function to evaluate a prompt and generate a response
  647. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  648. std::vector<llama_token> tokens;
  649. if (tokenize_prompt(llama_data.model, prompt, tokens) < 0) {
  650. return 1;
  651. }
  652. // prepare a batch for the prompt
  653. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  654. llama_token new_token_id;
  655. while (true) {
  656. check_context_size(llama_data.context, batch);
  657. if (llama_decode(llama_data.context.get(), batch)) {
  658. printe("failed to decode\n");
  659. return 1;
  660. }
  661. // sample the next token, check is it an end of generation?
  662. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  663. if (llama_token_is_eog(llama_data.model.get(), new_token_id)) {
  664. break;
  665. }
  666. std::string piece;
  667. if (convert_token_to_string(llama_data.model, new_token_id, piece)) {
  668. return 1;
  669. }
  670. print_word_and_concatenate_to_response(piece, response);
  671. // prepare the next batch with the sampled token
  672. batch = llama_batch_get_one(&new_token_id, 1);
  673. }
  674. return 0;
  675. }
  676. static int read_user_input(std::string & user) {
  677. std::getline(std::cin, user);
  678. return user.empty(); // Should have data in happy path
  679. }
  680. // Function to generate a response based on the prompt
  681. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  682. const bool stdout_a_terminal) {
  683. // Set response color
  684. if (stdout_a_terminal) {
  685. printf("\033[33m");
  686. }
  687. if (generate(llama_data, prompt, response)) {
  688. printe("failed to generate response\n");
  689. return 1;
  690. }
  691. // End response with color reset and newline
  692. printf("\n%s", stdout_a_terminal ? "\033[0m" : "");
  693. return 0;
  694. }
  695. // Helper function to apply the chat template and handle errors
  696. static int apply_chat_template_with_error_handling(LlamaData & llama_data, const bool append, int & output_length) {
  697. const int new_len = apply_chat_template(llama_data, append);
  698. if (new_len < 0) {
  699. printe("failed to apply the chat template\n");
  700. return -1;
  701. }
  702. output_length = new_len;
  703. return 0;
  704. }
  705. // Helper function to handle user input
  706. static int handle_user_input(std::string & user_input, const std::string & user) {
  707. if (!user.empty()) {
  708. user_input = user;
  709. return 0; // No need for interactive input
  710. }
  711. printf(
  712. "\r%*s"
  713. "\r\033[32m> \033[0m",
  714. get_terminal_width(), " ");
  715. return read_user_input(user_input); // Returns true if input ends the loop
  716. }
  717. static bool is_stdin_a_terminal() {
  718. #if defined(_WIN32)
  719. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  720. DWORD mode;
  721. return GetConsoleMode(hStdin, &mode);
  722. #else
  723. return isatty(STDIN_FILENO);
  724. #endif
  725. }
  726. static bool is_stdout_a_terminal() {
  727. #if defined(_WIN32)
  728. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  729. DWORD mode;
  730. return GetConsoleMode(hStdout, &mode);
  731. #else
  732. return isatty(STDOUT_FILENO);
  733. #endif
  734. }
  735. // Function to tokenize the prompt
  736. static int chat_loop(LlamaData & llama_data, const std::string & user) {
  737. int prev_len = 0;
  738. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  739. static const bool stdout_a_terminal = is_stdout_a_terminal();
  740. while (true) {
  741. // Get user input
  742. std::string user_input;
  743. while (handle_user_input(user_input, user)) {
  744. }
  745. add_message("user", user.empty() ? user_input : user, llama_data);
  746. int new_len;
  747. if (apply_chat_template_with_error_handling(llama_data, true, new_len) < 0) {
  748. return 1;
  749. }
  750. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  751. std::string response;
  752. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  753. return 1;
  754. }
  755. if (!user.empty()) {
  756. break;
  757. }
  758. add_message("assistant", response, llama_data);
  759. if (apply_chat_template_with_error_handling(llama_data, false, prev_len) < 0) {
  760. return 1;
  761. }
  762. }
  763. return 0;
  764. }
  765. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  766. const Opt * opt = static_cast<Opt *>(p);
  767. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  768. printe("%s", text);
  769. }
  770. }
  771. static std::string read_pipe_data() {
  772. std::ostringstream result;
  773. result << std::cin.rdbuf(); // Read all data from std::cin
  774. return result.str();
  775. }
  776. int main(int argc, const char ** argv) {
  777. Opt opt;
  778. const int ret = opt.init(argc, argv);
  779. if (ret == 2) {
  780. return 0;
  781. } else if (ret) {
  782. return 1;
  783. }
  784. if (!is_stdin_a_terminal()) {
  785. if (!opt.user.empty()) {
  786. opt.user += "\n\n";
  787. }
  788. opt.user += read_pipe_data();
  789. }
  790. llama_log_set(log_callback, &opt);
  791. LlamaData llama_data;
  792. if (llama_data.init(opt)) {
  793. return 1;
  794. }
  795. if (chat_loop(llama_data, opt.user)) {
  796. return 1;
  797. }
  798. return 0;
  799. }