1
0

run.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. #if defined(_WIN32)
  2. # include <windows.h>
  3. #else
  4. # include <sys/file.h>
  5. # include <sys/ioctl.h>
  6. # include <unistd.h>
  7. #endif
  8. #if defined(LLAMA_USE_CURL)
  9. # include <curl/curl.h>
  10. #endif
  11. #include <climits>
  12. #include <cstdarg>
  13. #include <cstdio>
  14. #include <cstring>
  15. #include <filesystem>
  16. #include <iostream>
  17. #include <sstream>
  18. #include <string>
  19. #include <vector>
  20. #include "common.h"
  21. #include "json.hpp"
  22. #include "llama-cpp.h"
  23. GGML_ATTRIBUTE_FORMAT(1, 2)
  24. static std::string fmt(const char * fmt, ...) {
  25. va_list ap;
  26. va_list ap2;
  27. va_start(ap, fmt);
  28. va_copy(ap2, ap);
  29. const int size = vsnprintf(NULL, 0, fmt, ap);
  30. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  31. std::string buf;
  32. buf.resize(size);
  33. const int size2 = vsnprintf(const_cast<char *>(buf.data()), buf.size() + 1, fmt, ap2);
  34. GGML_ASSERT(size2 == size);
  35. va_end(ap2);
  36. va_end(ap);
  37. return buf;
  38. }
  39. GGML_ATTRIBUTE_FORMAT(1, 2)
  40. static int printe(const char * fmt, ...) {
  41. va_list args;
  42. va_start(args, fmt);
  43. const int ret = vfprintf(stderr, fmt, args);
  44. va_end(args);
  45. return ret;
  46. }
  47. class Opt {
  48. public:
  49. int init(int argc, const char ** argv) {
  50. // Parse arguments
  51. if (parse(argc, argv)) {
  52. printe("Error: Failed to parse arguments.\n");
  53. help();
  54. return 1;
  55. }
  56. // If help is requested, show help and exit
  57. if (help_) {
  58. help();
  59. return 2;
  60. }
  61. return 0; // Success
  62. }
  63. std::string model_;
  64. std::string user_;
  65. int context_size_ = -1, ngl_ = -1;
  66. bool verbose_ = false;
  67. private:
  68. bool help_ = false;
  69. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  70. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  71. }
  72. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  73. if (i + 1 >= argc) {
  74. return 1;
  75. }
  76. option_value = std::atoi(argv[++i]);
  77. return 0;
  78. }
  79. int parse(int argc, const char ** argv) {
  80. bool options_parsing = true;
  81. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  82. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  83. if (handle_option_with_value(argc, argv, i, context_size_) == 1) {
  84. return 1;
  85. }
  86. } else if (options_parsing && (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  87. if (handle_option_with_value(argc, argv, i, ngl_) == 1) {
  88. return 1;
  89. }
  90. } else if (options_parsing &&
  91. (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  92. verbose_ = true;
  93. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  94. help_ = true;
  95. return 0;
  96. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  97. options_parsing = false;
  98. } else if (positional_args_i == 0) {
  99. if (!argv[i][0] || argv[i][0] == '-') {
  100. return 1;
  101. }
  102. ++positional_args_i;
  103. model_ = argv[i];
  104. } else if (positional_args_i == 1) {
  105. ++positional_args_i;
  106. user_ = argv[i];
  107. } else {
  108. user_ += " " + std::string(argv[i]);
  109. }
  110. }
  111. return 0;
  112. }
  113. void help() const {
  114. printf(
  115. "Description:\n"
  116. " Runs a llm\n"
  117. "\n"
  118. "Usage:\n"
  119. " llama-run [options] model [prompt]\n"
  120. "\n"
  121. "Options:\n"
  122. " -c, --context-size <value>\n"
  123. " Context size (default: %d)\n"
  124. " -n, --ngl <value>\n"
  125. " Number of GPU layers (default: %d)\n"
  126. " -v, --verbose, --log-verbose\n"
  127. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  128. " -h, --help\n"
  129. " Show help message\n"
  130. "\n"
  131. "Commands:\n"
  132. " model\n"
  133. " Model is a string with an optional prefix of \n"
  134. " huggingface:// (hf://), ollama://, https:// or file://.\n"
  135. " If no protocol is specified and a file exists in the specified\n"
  136. " path, file:// is assumed, otherwise if a file does not exist in\n"
  137. " the specified path, ollama:// is assumed. Models that are being\n"
  138. " pulled are downloaded with .partial extension while being\n"
  139. " downloaded and then renamed as the file without the .partial\n"
  140. " extension when complete.\n"
  141. "\n"
  142. "Examples:\n"
  143. " llama-run llama3\n"
  144. " llama-run ollama://granite-code\n"
  145. " llama-run ollama://smollm:135m\n"
  146. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  147. " llama-run "
  148. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  149. " llama-run https://example.com/some-file1.gguf\n"
  150. " llama-run some-file2.gguf\n"
  151. " llama-run file://some-file3.gguf\n"
  152. " llama-run --ngl 999 some-file4.gguf\n"
  153. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  154. llama_context_default_params().n_batch, llama_model_default_params().n_gpu_layers);
  155. }
  156. };
  157. struct progress_data {
  158. size_t file_size = 0;
  159. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  160. bool printed = false;
  161. };
  162. static int get_terminal_width() {
  163. #if defined(_WIN32)
  164. CONSOLE_SCREEN_BUFFER_INFO csbi;
  165. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  166. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  167. #else
  168. struct winsize w;
  169. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  170. return w.ws_col;
  171. #endif
  172. }
  173. #ifdef LLAMA_USE_CURL
  174. class File {
  175. public:
  176. FILE * file = nullptr;
  177. FILE * open(const std::string & filename, const char * mode) {
  178. file = fopen(filename.c_str(), mode);
  179. return file;
  180. }
  181. int lock() {
  182. if (file) {
  183. # ifdef _WIN32
  184. fd = _fileno(file);
  185. hFile = (HANDLE) _get_osfhandle(fd);
  186. if (hFile == INVALID_HANDLE_VALUE) {
  187. fd = -1;
  188. return 1;
  189. }
  190. OVERLAPPED overlapped = { 0 };
  191. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  192. &overlapped)) {
  193. fd = -1;
  194. return 1;
  195. }
  196. # else
  197. fd = fileno(file);
  198. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  199. fd = -1;
  200. return 1;
  201. }
  202. # endif
  203. }
  204. return 0;
  205. }
  206. ~File() {
  207. if (fd >= 0) {
  208. # ifdef _WIN32
  209. if (hFile != INVALID_HANDLE_VALUE) {
  210. OVERLAPPED overlapped = { 0 };
  211. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  212. }
  213. # else
  214. flock(fd, LOCK_UN);
  215. # endif
  216. }
  217. if (file) {
  218. fclose(file);
  219. }
  220. }
  221. private:
  222. int fd = -1;
  223. # ifdef _WIN32
  224. HANDLE hFile;
  225. # endif
  226. };
  227. class HttpClient {
  228. public:
  229. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  230. const bool progress, std::string * response_str = nullptr) {
  231. std::string output_file_partial;
  232. curl = curl_easy_init();
  233. if (!curl) {
  234. return 1;
  235. }
  236. progress_data data;
  237. File out;
  238. if (!output_file.empty()) {
  239. output_file_partial = output_file + ".partial";
  240. if (!out.open(output_file_partial, "ab")) {
  241. printe("Failed to open file\n");
  242. return 1;
  243. }
  244. if (out.lock()) {
  245. printe("Failed to exclusively lock file\n");
  246. return 1;
  247. }
  248. }
  249. set_write_options(response_str, out);
  250. data.file_size = set_resume_point(output_file_partial);
  251. set_progress_options(progress, data);
  252. set_headers(headers);
  253. perform(url);
  254. if (!output_file.empty()) {
  255. std::filesystem::rename(output_file_partial, output_file);
  256. }
  257. return 0;
  258. }
  259. ~HttpClient() {
  260. if (chunk) {
  261. curl_slist_free_all(chunk);
  262. }
  263. if (curl) {
  264. curl_easy_cleanup(curl);
  265. }
  266. }
  267. private:
  268. CURL * curl = nullptr;
  269. struct curl_slist * chunk = nullptr;
  270. void set_write_options(std::string * response_str, const File & out) {
  271. if (response_str) {
  272. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  273. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  274. } else {
  275. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  276. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  277. }
  278. }
  279. size_t set_resume_point(const std::string & output_file) {
  280. size_t file_size = 0;
  281. if (std::filesystem::exists(output_file)) {
  282. file_size = std::filesystem::file_size(output_file);
  283. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  284. }
  285. return file_size;
  286. }
  287. void set_progress_options(bool progress, progress_data & data) {
  288. if (progress) {
  289. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  290. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  291. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  292. }
  293. }
  294. void set_headers(const std::vector<std::string> & headers) {
  295. if (!headers.empty()) {
  296. if (chunk) {
  297. curl_slist_free_all(chunk);
  298. chunk = 0;
  299. }
  300. for (const auto & header : headers) {
  301. chunk = curl_slist_append(chunk, header.c_str());
  302. }
  303. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  304. }
  305. }
  306. void perform(const std::string & url) {
  307. CURLcode res;
  308. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  309. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  310. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  311. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  312. res = curl_easy_perform(curl);
  313. if (res != CURLE_OK) {
  314. printe("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
  315. }
  316. }
  317. static std::string human_readable_time(double seconds) {
  318. int hrs = static_cast<int>(seconds) / 3600;
  319. int mins = (static_cast<int>(seconds) % 3600) / 60;
  320. int secs = static_cast<int>(seconds) % 60;
  321. if (hrs > 0) {
  322. return fmt("%dh %02dm %02ds", hrs, mins, secs);
  323. } else if (mins > 0) {
  324. return fmt("%dm %02ds", mins, secs);
  325. } else {
  326. return fmt("%ds", secs);
  327. }
  328. }
  329. static std::string human_readable_size(curl_off_t size) {
  330. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  331. char length = sizeof(suffix) / sizeof(suffix[0]);
  332. int i = 0;
  333. double dbl_size = size;
  334. if (size > 1024) {
  335. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  336. dbl_size = size / 1024.0;
  337. }
  338. }
  339. return fmt("%.2f %s", dbl_size, suffix[i]);
  340. }
  341. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  342. curl_off_t) {
  343. progress_data * data = static_cast<progress_data *>(ptr);
  344. if (total_to_download <= 0) {
  345. return 0;
  346. }
  347. total_to_download += data->file_size;
  348. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  349. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  350. std::string progress_prefix = generate_progress_prefix(percentage);
  351. const double speed = calculate_speed(now_downloaded, data->start_time);
  352. const double tim = (total_to_download - now_downloaded) / speed;
  353. std::string progress_suffix =
  354. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  355. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  356. std::string progress_bar;
  357. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  358. print_progress(progress_prefix, progress_bar, progress_suffix);
  359. data->printed = true;
  360. return 0;
  361. }
  362. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  363. return (now_downloaded_plus_file_size * 100) / total_to_download;
  364. }
  365. static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", percentage); }
  366. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  367. const auto now = std::chrono::steady_clock::now();
  368. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  369. return now_downloaded / elapsed_seconds.count();
  370. }
  371. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  372. double speed, double estimated_time) {
  373. const int width = 10;
  374. return fmt("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(), width,
  375. human_readable_size(total_to_download).c_str(), width, human_readable_size(speed).c_str(), width,
  376. human_readable_time(estimated_time).c_str());
  377. }
  378. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  379. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  380. if (progress_bar_width < 1) {
  381. progress_bar_width = 1;
  382. }
  383. return progress_bar_width;
  384. }
  385. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  386. std::string & progress_bar) {
  387. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  388. for (int i = 0; i < progress_bar_width; ++i) {
  389. progress_bar.append((i < pos) ? "█" : " ");
  390. }
  391. return progress_bar;
  392. }
  393. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  394. const std::string & progress_suffix) {
  395. printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(),
  396. progress_suffix.c_str());
  397. }
  398. // Function to write data to a file
  399. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  400. FILE * out = static_cast<FILE *>(stream);
  401. return fwrite(ptr, size, nmemb, out);
  402. }
  403. // Function to capture data into a string
  404. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  405. std::string * str = static_cast<std::string *>(stream);
  406. str->append(static_cast<char *>(ptr), size * nmemb);
  407. return size * nmemb;
  408. }
  409. };
  410. #endif
  411. class LlamaData {
  412. public:
  413. llama_model_ptr model;
  414. llama_sampler_ptr sampler;
  415. llama_context_ptr context;
  416. std::vector<llama_chat_message> messages;
  417. std::vector<std::string> msg_strs;
  418. std::vector<char> fmtted;
  419. int init(Opt & opt) {
  420. model = initialize_model(opt);
  421. if (!model) {
  422. return 1;
  423. }
  424. context = initialize_context(model, opt.context_size_);
  425. if (!context) {
  426. return 1;
  427. }
  428. sampler = initialize_sampler();
  429. return 0;
  430. }
  431. private:
  432. #ifdef LLAMA_USE_CURL
  433. int download(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  434. const bool progress, std::string * response_str = nullptr) {
  435. HttpClient http;
  436. if (http.init(url, headers, output_file, progress, response_str)) {
  437. return 1;
  438. }
  439. return 0;
  440. }
  441. #else
  442. int download(const std::string &, const std::vector<std::string> &, const std::string &, const bool,
  443. std::string * = nullptr) {
  444. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  445. return 1;
  446. }
  447. #endif
  448. int huggingface_dl(const std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  449. // Find the second occurrence of '/' after protocol string
  450. size_t pos = model.find('/');
  451. pos = model.find('/', pos + 1);
  452. if (pos == std::string::npos) {
  453. return 1;
  454. }
  455. const std::string hfr = model.substr(0, pos);
  456. const std::string hff = model.substr(pos + 1);
  457. const std::string url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff;
  458. return download(url, headers, bn, true);
  459. }
  460. int ollama_dl(std::string & model, const std::vector<std::string> headers, const std::string & bn) {
  461. if (model.find('/') == std::string::npos) {
  462. model = "library/" + model;
  463. }
  464. std::string model_tag = "latest";
  465. size_t colon_pos = model.find(':');
  466. if (colon_pos != std::string::npos) {
  467. model_tag = model.substr(colon_pos + 1);
  468. model = model.substr(0, colon_pos);
  469. }
  470. std::string manifest_url = "https://registry.ollama.ai/v2/" + model + "/manifests/" + model_tag;
  471. std::string manifest_str;
  472. const int ret = download(manifest_url, headers, "", false, &manifest_str);
  473. if (ret) {
  474. return ret;
  475. }
  476. nlohmann::json manifest = nlohmann::json::parse(manifest_str);
  477. std::string layer;
  478. for (const auto & l : manifest["layers"]) {
  479. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  480. layer = l["digest"];
  481. break;
  482. }
  483. }
  484. std::string blob_url = "https://registry.ollama.ai/v2/" + model + "/blobs/" + layer;
  485. return download(blob_url, headers, bn, true);
  486. }
  487. std::string basename(const std::string & path) {
  488. const size_t pos = path.find_last_of("/\\");
  489. if (pos == std::string::npos) {
  490. return path;
  491. }
  492. return path.substr(pos + 1);
  493. }
  494. int remove_proto(std::string & model_) {
  495. const std::string::size_type pos = model_.find("://");
  496. if (pos == std::string::npos) {
  497. return 1;
  498. }
  499. model_ = model_.substr(pos + 3); // Skip past "://"
  500. return 0;
  501. }
  502. int resolve_model(std::string & model_) {
  503. int ret = 0;
  504. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  505. remove_proto(model_);
  506. return ret;
  507. }
  508. const std::string bn = basename(model_);
  509. const std::vector<std::string> headers = { "--header",
  510. "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  511. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) {
  512. remove_proto(model_);
  513. ret = huggingface_dl(model_, headers, bn);
  514. } else if (string_starts_with(model_, "ollama://")) {
  515. remove_proto(model_);
  516. ret = ollama_dl(model_, headers, bn);
  517. } else if (string_starts_with(model_, "https://")) {
  518. download(model_, headers, bn, true);
  519. } else {
  520. ret = ollama_dl(model_, headers, bn);
  521. }
  522. model_ = bn;
  523. return ret;
  524. }
  525. // Initializes the model and returns a unique pointer to it
  526. llama_model_ptr initialize_model(Opt & opt) {
  527. ggml_backend_load_all();
  528. llama_model_params model_params = llama_model_default_params();
  529. model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers;
  530. resolve_model(opt.model_);
  531. printe(
  532. "\r%*s"
  533. "\rLoading model",
  534. get_terminal_width(), " ");
  535. llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), model_params));
  536. if (!model) {
  537. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  538. }
  539. printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " ");
  540. return model;
  541. }
  542. // Initializes the context with the specified parameters
  543. llama_context_ptr initialize_context(const llama_model_ptr & model, const int n_ctx) {
  544. llama_context_params ctx_params = llama_context_default_params();
  545. ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch;
  546. llama_context_ptr context(llama_new_context_with_model(model.get(), ctx_params));
  547. if (!context) {
  548. printe("%s: error: failed to create the llama_context\n", __func__);
  549. }
  550. return context;
  551. }
  552. // Initializes and configures the sampler
  553. llama_sampler_ptr initialize_sampler() {
  554. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  555. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  556. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(0.8f));
  557. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  558. return sampler;
  559. }
  560. };
  561. // Add a message to `messages` and store its content in `msg_strs`
  562. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  563. llama_data.msg_strs.push_back(std::move(text));
  564. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  565. }
  566. // Function to apply the chat template and resize `formatted` if needed
  567. static int apply_chat_template(LlamaData & llama_data, const bool append) {
  568. int result = llama_chat_apply_template(
  569. llama_data.model.get(), nullptr, llama_data.messages.data(), llama_data.messages.size(), append,
  570. append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0);
  571. if (append && result > static_cast<int>(llama_data.fmtted.size())) {
  572. llama_data.fmtted.resize(result);
  573. result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(),
  574. llama_data.messages.size(), append, llama_data.fmtted.data(),
  575. llama_data.fmtted.size());
  576. }
  577. return result;
  578. }
  579. // Function to tokenize the prompt
  580. static int tokenize_prompt(const llama_model_ptr & model, const std::string & prompt,
  581. std::vector<llama_token> & prompt_tokens) {
  582. const int n_prompt_tokens = -llama_tokenize(model.get(), prompt.c_str(), prompt.size(), NULL, 0, true, true);
  583. prompt_tokens.resize(n_prompt_tokens);
  584. if (llama_tokenize(model.get(), prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true,
  585. true) < 0) {
  586. printe("failed to tokenize the prompt\n");
  587. return -1;
  588. }
  589. return n_prompt_tokens;
  590. }
  591. // Check if we have enough space in the context to evaluate this batch
  592. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  593. const int n_ctx = llama_n_ctx(ctx.get());
  594. const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get());
  595. if (n_ctx_used + batch.n_tokens > n_ctx) {
  596. printf("\033[0m\n");
  597. printe("context size exceeded\n");
  598. return 1;
  599. }
  600. return 0;
  601. }
  602. // convert the token to a string
  603. static int convert_token_to_string(const llama_model_ptr & model, const llama_token token_id, std::string & piece) {
  604. char buf[256];
  605. int n = llama_token_to_piece(model.get(), token_id, buf, sizeof(buf), 0, true);
  606. if (n < 0) {
  607. printe("failed to convert token to piece\n");
  608. return 1;
  609. }
  610. piece = std::string(buf, n);
  611. return 0;
  612. }
  613. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  614. printf("%s", piece.c_str());
  615. fflush(stdout);
  616. response += piece;
  617. }
  618. // helper function to evaluate a prompt and generate a response
  619. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  620. std::vector<llama_token> tokens;
  621. if (tokenize_prompt(llama_data.model, prompt, tokens) < 0) {
  622. return 1;
  623. }
  624. // prepare a batch for the prompt
  625. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  626. llama_token new_token_id;
  627. while (true) {
  628. check_context_size(llama_data.context, batch);
  629. if (llama_decode(llama_data.context.get(), batch)) {
  630. printe("failed to decode\n");
  631. return 1;
  632. }
  633. // sample the next token, check is it an end of generation?
  634. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  635. if (llama_token_is_eog(llama_data.model.get(), new_token_id)) {
  636. break;
  637. }
  638. std::string piece;
  639. if (convert_token_to_string(llama_data.model, new_token_id, piece)) {
  640. return 1;
  641. }
  642. print_word_and_concatenate_to_response(piece, response);
  643. // prepare the next batch with the sampled token
  644. batch = llama_batch_get_one(&new_token_id, 1);
  645. }
  646. return 0;
  647. }
  648. static int read_user_input(std::string & user) {
  649. std::getline(std::cin, user);
  650. return user.empty(); // Should have data in happy path
  651. }
  652. // Function to generate a response based on the prompt
  653. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  654. const bool stdout_a_terminal) {
  655. // Set response color
  656. if (stdout_a_terminal) {
  657. printf("\033[33m");
  658. }
  659. if (generate(llama_data, prompt, response)) {
  660. printe("failed to generate response\n");
  661. return 1;
  662. }
  663. // End response with color reset and newline
  664. printf("\n%s", stdout_a_terminal ? "\033[0m" : "");
  665. return 0;
  666. }
  667. // Helper function to apply the chat template and handle errors
  668. static int apply_chat_template_with_error_handling(LlamaData & llama_data, const bool append, int & output_length) {
  669. const int new_len = apply_chat_template(llama_data, append);
  670. if (new_len < 0) {
  671. printe("failed to apply the chat template\n");
  672. return -1;
  673. }
  674. output_length = new_len;
  675. return 0;
  676. }
  677. // Helper function to handle user input
  678. static int handle_user_input(std::string & user_input, const std::string & user_) {
  679. if (!user_.empty()) {
  680. user_input = user_;
  681. return 0; // No need for interactive input
  682. }
  683. printf(
  684. "\r%*s"
  685. "\r\033[32m> \033[0m",
  686. get_terminal_width(), " ");
  687. return read_user_input(user_input); // Returns true if input ends the loop
  688. }
  689. static bool is_stdin_a_terminal() {
  690. #if defined(_WIN32)
  691. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  692. DWORD mode;
  693. return GetConsoleMode(hStdin, &mode);
  694. #else
  695. return isatty(STDIN_FILENO);
  696. #endif
  697. }
  698. static bool is_stdout_a_terminal() {
  699. #if defined(_WIN32)
  700. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  701. DWORD mode;
  702. return GetConsoleMode(hStdout, &mode);
  703. #else
  704. return isatty(STDOUT_FILENO);
  705. #endif
  706. }
  707. // Function to tokenize the prompt
  708. static int chat_loop(LlamaData & llama_data, const std::string & user_) {
  709. int prev_len = 0;
  710. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  711. static const bool stdout_a_terminal = is_stdout_a_terminal();
  712. while (true) {
  713. // Get user input
  714. std::string user_input;
  715. while (handle_user_input(user_input, user_)) {
  716. }
  717. add_message("user", user_.empty() ? user_input : user_, llama_data);
  718. int new_len;
  719. if (apply_chat_template_with_error_handling(llama_data, true, new_len) < 0) {
  720. return 1;
  721. }
  722. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  723. std::string response;
  724. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  725. return 1;
  726. }
  727. if (!user_.empty()) {
  728. break;
  729. }
  730. add_message("assistant", response, llama_data);
  731. if (apply_chat_template_with_error_handling(llama_data, false, prev_len) < 0) {
  732. return 1;
  733. }
  734. }
  735. return 0;
  736. }
  737. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  738. const Opt * opt = static_cast<Opt *>(p);
  739. if (opt->verbose_ || level == GGML_LOG_LEVEL_ERROR) {
  740. printe("%s", text);
  741. }
  742. }
  743. static std::string read_pipe_data() {
  744. std::ostringstream result;
  745. result << std::cin.rdbuf(); // Read all data from std::cin
  746. return result.str();
  747. }
  748. int main(int argc, const char ** argv) {
  749. Opt opt;
  750. const int ret = opt.init(argc, argv);
  751. if (ret == 2) {
  752. return 0;
  753. } else if (ret) {
  754. return 1;
  755. }
  756. if (!is_stdin_a_terminal()) {
  757. if (!opt.user_.empty()) {
  758. opt.user_ += "\n\n";
  759. }
  760. opt.user_ += read_pipe_data();
  761. }
  762. llama_log_set(log_callback, &opt);
  763. LlamaData llama_data;
  764. if (llama_data.init(opt)) {
  765. return 1;
  766. }
  767. if (chat_loop(llama_data, opt.user_)) {
  768. return 1;
  769. }
  770. return 0;
  771. }