1
0

run.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. #if defined(_WIN32)
  2. # include <windows.h>
  3. # include <io.h>
  4. #else
  5. # include <sys/file.h>
  6. # include <sys/ioctl.h>
  7. # include <unistd.h>
  8. #endif
  9. #if defined(LLAMA_USE_CURL)
  10. # include <curl/curl.h>
  11. #endif
  12. #include <signal.h>
  13. #include <climits>
  14. #include <cstdarg>
  15. #include <cstdio>
  16. #include <cstring>
  17. #include <filesystem>
  18. #include <iostream>
  19. #include <list>
  20. #include <sstream>
  21. #include <string>
  22. #include <vector>
  23. #include "chat-template.hpp"
  24. #include "common.h"
  25. #include "json.hpp"
  26. #include "linenoise.cpp/linenoise.h"
  27. #include "llama-cpp.h"
  28. #include "log.h"
  29. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
  30. [[noreturn]] static void sigint_handler(int) {
  31. printf("\n" LOG_COL_DEFAULT);
  32. exit(0); // not ideal, but it's the only way to guarantee exit in all cases
  33. }
  34. #endif
  35. GGML_ATTRIBUTE_FORMAT(1, 2)
  36. static std::string fmt(const char * fmt, ...) {
  37. va_list ap;
  38. va_list ap2;
  39. va_start(ap, fmt);
  40. va_copy(ap2, ap);
  41. const int size = vsnprintf(NULL, 0, fmt, ap);
  42. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  43. std::string buf;
  44. buf.resize(size);
  45. const int size2 = vsnprintf(const_cast<char *>(buf.data()), buf.size() + 1, fmt, ap2);
  46. GGML_ASSERT(size2 == size);
  47. va_end(ap2);
  48. va_end(ap);
  49. return buf;
  50. }
  51. GGML_ATTRIBUTE_FORMAT(1, 2)
  52. static int printe(const char * fmt, ...) {
  53. va_list args;
  54. va_start(args, fmt);
  55. const int ret = vfprintf(stderr, fmt, args);
  56. va_end(args);
  57. return ret;
  58. }
  59. static std::string strftime_fmt(const char * fmt, const std::tm & tm) {
  60. std::ostringstream oss;
  61. oss << std::put_time(&tm, fmt);
  62. return oss.str();
  63. }
  64. class Opt {
  65. public:
  66. int init(int argc, const char ** argv) {
  67. ctx_params = llama_context_default_params();
  68. model_params = llama_model_default_params();
  69. context_size_default = ctx_params.n_batch;
  70. ngl_default = model_params.n_gpu_layers;
  71. common_params_sampling sampling;
  72. temperature_default = sampling.temp;
  73. if (argc < 2) {
  74. printe("Error: No arguments provided.\n");
  75. print_help();
  76. return 1;
  77. }
  78. // Parse arguments
  79. if (parse(argc, argv)) {
  80. printe("Error: Failed to parse arguments.\n");
  81. print_help();
  82. return 1;
  83. }
  84. // If help is requested, show help and exit
  85. if (help) {
  86. print_help();
  87. return 2;
  88. }
  89. ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default;
  90. ctx_params.n_ctx = ctx_params.n_batch;
  91. model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
  92. temperature = temperature >= 0 ? temperature : temperature_default;
  93. return 0; // Success
  94. }
  95. llama_context_params ctx_params;
  96. llama_model_params model_params;
  97. std::string model_;
  98. std::string user;
  99. bool use_jinja = false;
  100. int context_size = -1, ngl = -1;
  101. float temperature = -1;
  102. bool verbose = false;
  103. private:
  104. int context_size_default = -1, ngl_default = -1;
  105. float temperature_default = -1;
  106. bool help = false;
  107. bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
  108. return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0;
  109. }
  110. int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) {
  111. if (i + 1 >= argc) {
  112. return 1;
  113. }
  114. option_value = std::atoi(argv[++i]);
  115. return 0;
  116. }
  117. int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
  118. if (i + 1 >= argc) {
  119. return 1;
  120. }
  121. option_value = std::atof(argv[++i]);
  122. return 0;
  123. }
  124. int parse(int argc, const char ** argv) {
  125. bool options_parsing = true;
  126. for (int i = 1, positional_args_i = 0; i < argc; ++i) {
  127. if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) {
  128. if (handle_option_with_value(argc, argv, i, context_size) == 1) {
  129. return 1;
  130. }
  131. } else if (options_parsing &&
  132. (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) {
  133. if (handle_option_with_value(argc, argv, i, ngl) == 1) {
  134. return 1;
  135. }
  136. } else if (options_parsing && strcmp(argv[i], "--temp") == 0) {
  137. if (handle_option_with_value(argc, argv, i, temperature) == 1) {
  138. return 1;
  139. }
  140. } else if (options_parsing &&
  141. (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
  142. verbose = true;
  143. } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) {
  144. use_jinja = true;
  145. } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) {
  146. help = true;
  147. return 0;
  148. } else if (options_parsing && strcmp(argv[i], "--") == 0) {
  149. options_parsing = false;
  150. } else if (positional_args_i == 0) {
  151. if (!argv[i][0] || argv[i][0] == '-') {
  152. return 1;
  153. }
  154. ++positional_args_i;
  155. model_ = argv[i];
  156. } else if (positional_args_i == 1) {
  157. ++positional_args_i;
  158. user = argv[i];
  159. } else {
  160. user += " " + std::string(argv[i]);
  161. }
  162. }
  163. if (model_.empty()){
  164. return 1;
  165. }
  166. return 0;
  167. }
  168. void print_help() const {
  169. printf(
  170. "Description:\n"
  171. " Runs a llm\n"
  172. "\n"
  173. "Usage:\n"
  174. " llama-run [options] model [prompt]\n"
  175. "\n"
  176. "Options:\n"
  177. " -c, --context-size <value>\n"
  178. " Context size (default: %d)\n"
  179. " -n, -ngl, --ngl <value>\n"
  180. " Number of GPU layers (default: %d)\n"
  181. " --temp <value>\n"
  182. " Temperature (default: %.1f)\n"
  183. " -v, --verbose, --log-verbose\n"
  184. " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
  185. " -h, --help\n"
  186. " Show help message\n"
  187. "\n"
  188. "Commands:\n"
  189. " model\n"
  190. " Model is a string with an optional prefix of \n"
  191. " huggingface:// (hf://), ollama://, https:// or file://.\n"
  192. " If no protocol is specified and a file exists in the specified\n"
  193. " path, file:// is assumed, otherwise if a file does not exist in\n"
  194. " the specified path, ollama:// is assumed. Models that are being\n"
  195. " pulled are downloaded with .partial extension while being\n"
  196. " downloaded and then renamed as the file without the .partial\n"
  197. " extension when complete.\n"
  198. "\n"
  199. "Examples:\n"
  200. " llama-run llama3\n"
  201. " llama-run ollama://granite-code\n"
  202. " llama-run ollama://smollm:135m\n"
  203. " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
  204. " llama-run "
  205. "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
  206. " llama-run https://example.com/some-file1.gguf\n"
  207. " llama-run some-file2.gguf\n"
  208. " llama-run file://some-file3.gguf\n"
  209. " llama-run --ngl 999 some-file4.gguf\n"
  210. " llama-run --ngl 999 some-file5.gguf Hello World\n",
  211. context_size_default, ngl_default, temperature_default);
  212. }
  213. };
  214. struct progress_data {
  215. size_t file_size = 0;
  216. std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
  217. bool printed = false;
  218. };
  219. static int get_terminal_width() {
  220. #if defined(_WIN32)
  221. CONSOLE_SCREEN_BUFFER_INFO csbi;
  222. GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
  223. return csbi.srWindow.Right - csbi.srWindow.Left + 1;
  224. #else
  225. struct winsize w;
  226. ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
  227. return w.ws_col;
  228. #endif
  229. }
  230. #ifdef LLAMA_USE_CURL
  231. class File {
  232. public:
  233. FILE * file = nullptr;
  234. FILE * open(const std::string & filename, const char * mode) {
  235. file = fopen(filename.c_str(), mode);
  236. return file;
  237. }
  238. int lock() {
  239. if (file) {
  240. # ifdef _WIN32
  241. fd = _fileno(file);
  242. hFile = (HANDLE) _get_osfhandle(fd);
  243. if (hFile == INVALID_HANDLE_VALUE) {
  244. fd = -1;
  245. return 1;
  246. }
  247. OVERLAPPED overlapped = {};
  248. if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD,
  249. &overlapped)) {
  250. fd = -1;
  251. return 1;
  252. }
  253. # else
  254. fd = fileno(file);
  255. if (flock(fd, LOCK_EX | LOCK_NB) != 0) {
  256. fd = -1;
  257. return 1;
  258. }
  259. # endif
  260. }
  261. return 0;
  262. }
  263. ~File() {
  264. if (fd >= 0) {
  265. # ifdef _WIN32
  266. if (hFile != INVALID_HANDLE_VALUE) {
  267. OVERLAPPED overlapped = {};
  268. UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped);
  269. }
  270. # else
  271. flock(fd, LOCK_UN);
  272. # endif
  273. }
  274. if (file) {
  275. fclose(file);
  276. }
  277. }
  278. private:
  279. int fd = -1;
  280. # ifdef _WIN32
  281. HANDLE hFile = nullptr;
  282. # endif
  283. };
  284. class HttpClient {
  285. public:
  286. int init(const std::string & url, const std::vector<std::string> & headers, const std::string & output_file,
  287. const bool progress, std::string * response_str = nullptr) {
  288. if (std::filesystem::exists(output_file)) {
  289. return 0;
  290. }
  291. std::string output_file_partial;
  292. curl = curl_easy_init();
  293. if (!curl) {
  294. return 1;
  295. }
  296. progress_data data;
  297. File out;
  298. if (!output_file.empty()) {
  299. output_file_partial = output_file + ".partial";
  300. if (!out.open(output_file_partial, "ab")) {
  301. printe("Failed to open file for writing\n");
  302. return 1;
  303. }
  304. if (out.lock()) {
  305. printe("Failed to exclusively lock file\n");
  306. return 1;
  307. }
  308. }
  309. set_write_options(response_str, out);
  310. data.file_size = set_resume_point(output_file_partial);
  311. set_progress_options(progress, data);
  312. set_headers(headers);
  313. CURLcode res = perform(url);
  314. if (res != CURLE_OK){
  315. printe("Fetching resource '%s' failed: %s\n", url.c_str(), curl_easy_strerror(res));
  316. return 1;
  317. }
  318. if (!output_file.empty()) {
  319. std::filesystem::rename(output_file_partial, output_file);
  320. }
  321. return 0;
  322. }
  323. ~HttpClient() {
  324. if (chunk) {
  325. curl_slist_free_all(chunk);
  326. }
  327. if (curl) {
  328. curl_easy_cleanup(curl);
  329. }
  330. }
  331. private:
  332. CURL * curl = nullptr;
  333. struct curl_slist * chunk = nullptr;
  334. void set_write_options(std::string * response_str, const File & out) {
  335. if (response_str) {
  336. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data);
  337. curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str);
  338. } else {
  339. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
  340. curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file);
  341. }
  342. }
  343. size_t set_resume_point(const std::string & output_file) {
  344. size_t file_size = 0;
  345. if (std::filesystem::exists(output_file)) {
  346. file_size = std::filesystem::file_size(output_file);
  347. curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast<curl_off_t>(file_size));
  348. }
  349. return file_size;
  350. }
  351. void set_progress_options(bool progress, progress_data & data) {
  352. if (progress) {
  353. curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
  354. curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data);
  355. curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress);
  356. }
  357. }
  358. void set_headers(const std::vector<std::string> & headers) {
  359. if (!headers.empty()) {
  360. if (chunk) {
  361. curl_slist_free_all(chunk);
  362. chunk = 0;
  363. }
  364. for (const auto & header : headers) {
  365. chunk = curl_slist_append(chunk, header.c_str());
  366. }
  367. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
  368. }
  369. }
  370. CURLcode perform(const std::string & url) {
  371. curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
  372. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
  373. curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
  374. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
  375. return curl_easy_perform(curl);
  376. }
  377. static std::string human_readable_time(double seconds) {
  378. int hrs = static_cast<int>(seconds) / 3600;
  379. int mins = (static_cast<int>(seconds) % 3600) / 60;
  380. int secs = static_cast<int>(seconds) % 60;
  381. if (hrs > 0) {
  382. return fmt("%dh %02dm %02ds", hrs, mins, secs);
  383. } else if (mins > 0) {
  384. return fmt("%dm %02ds", mins, secs);
  385. } else {
  386. return fmt("%ds", secs);
  387. }
  388. }
  389. static std::string human_readable_size(curl_off_t size) {
  390. static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" };
  391. char length = sizeof(suffix) / sizeof(suffix[0]);
  392. int i = 0;
  393. double dbl_size = size;
  394. if (size > 1024) {
  395. for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {
  396. dbl_size = size / 1024.0;
  397. }
  398. }
  399. return fmt("%.2f %s", dbl_size, suffix[i]);
  400. }
  401. static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t,
  402. curl_off_t) {
  403. progress_data * data = static_cast<progress_data *>(ptr);
  404. if (total_to_download <= 0) {
  405. return 0;
  406. }
  407. total_to_download += data->file_size;
  408. const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size;
  409. const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download);
  410. std::string progress_prefix = generate_progress_prefix(percentage);
  411. const double speed = calculate_speed(now_downloaded, data->start_time);
  412. const double tim = (total_to_download - now_downloaded) / speed;
  413. std::string progress_suffix =
  414. generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim);
  415. int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix);
  416. std::string progress_bar;
  417. generate_progress_bar(progress_bar_width, percentage, progress_bar);
  418. print_progress(progress_prefix, progress_bar, progress_suffix);
  419. data->printed = true;
  420. return 0;
  421. }
  422. static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
  423. return (now_downloaded_plus_file_size * 100) / total_to_download;
  424. }
  425. static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", static_cast<long int>(percentage)); }
  426. static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
  427. const auto now = std::chrono::steady_clock::now();
  428. const std::chrono::duration<double> elapsed_seconds = now - start_time;
  429. return now_downloaded / elapsed_seconds.count();
  430. }
  431. static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
  432. double speed, double estimated_time) {
  433. const int width = 10;
  434. return fmt("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(), width,
  435. human_readable_size(total_to_download).c_str(), width, human_readable_size(speed).c_str(), width,
  436. human_readable_time(estimated_time).c_str());
  437. }
  438. static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) {
  439. int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3;
  440. if (progress_bar_width < 1) {
  441. progress_bar_width = 1;
  442. }
  443. return progress_bar_width;
  444. }
  445. static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage,
  446. std::string & progress_bar) {
  447. const curl_off_t pos = (percentage * progress_bar_width) / 100;
  448. for (int i = 0; i < progress_bar_width; ++i) {
  449. progress_bar.append((i < pos) ? "█" : " ");
  450. }
  451. return progress_bar;
  452. }
  453. static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
  454. const std::string & progress_suffix) {
  455. printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
  456. }
  457. // Function to write data to a file
  458. static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  459. FILE * out = static_cast<FILE *>(stream);
  460. return fwrite(ptr, size, nmemb, out);
  461. }
  462. // Function to capture data into a string
  463. static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) {
  464. std::string * str = static_cast<std::string *>(stream);
  465. str->append(static_cast<char *>(ptr), size * nmemb);
  466. return size * nmemb;
  467. }
  468. };
  469. #endif
  470. class LlamaData {
  471. public:
  472. llama_model_ptr model;
  473. llama_sampler_ptr sampler;
  474. llama_context_ptr context;
  475. std::vector<llama_chat_message> messages;
  476. std::list<std::string> msg_strs;
  477. std::vector<char> fmtted;
  478. int init(Opt & opt) {
  479. model = initialize_model(opt);
  480. if (!model) {
  481. return 1;
  482. }
  483. context = initialize_context(model, opt);
  484. if (!context) {
  485. return 1;
  486. }
  487. sampler = initialize_sampler(opt);
  488. return 0;
  489. }
  490. private:
  491. #ifdef LLAMA_USE_CURL
  492. int download(const std::string & url, const std::string & output_file, const bool progress,
  493. const std::vector<std::string> & headers = {}, std::string * response_str = nullptr) {
  494. HttpClient http;
  495. if (http.init(url, headers, output_file, progress, response_str)) {
  496. return 1;
  497. }
  498. return 0;
  499. }
  500. #else
  501. int download(const std::string &, const std::string &, const bool, const std::vector<std::string> & = {},
  502. std::string * = nullptr) {
  503. printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  504. return 1;
  505. }
  506. #endif
  507. // Helper function to handle model tag extraction and URL construction
  508. std::pair<std::string, std::string> extract_model_and_tag(std::string & model, const std::string & base_url) {
  509. std::string model_tag = "latest";
  510. const size_t colon_pos = model.find(':');
  511. if (colon_pos != std::string::npos) {
  512. model_tag = model.substr(colon_pos + 1);
  513. model = model.substr(0, colon_pos);
  514. }
  515. std::string url = base_url + model + "/manifests/" + model_tag;
  516. return { model, url };
  517. }
  518. // Helper function to download and parse the manifest
  519. int download_and_parse_manifest(const std::string & url, const std::vector<std::string> & headers,
  520. nlohmann::json & manifest) {
  521. std::string manifest_str;
  522. int ret = download(url, "", false, headers, &manifest_str);
  523. if (ret) {
  524. return ret;
  525. }
  526. manifest = nlohmann::json::parse(manifest_str);
  527. return 0;
  528. }
  529. int huggingface_dl(std::string & model, const std::string & bn) {
  530. // Find the second occurrence of '/' after protocol string
  531. size_t pos = model.find('/');
  532. pos = model.find('/', pos + 1);
  533. std::string hfr, hff;
  534. std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" };
  535. std::string url;
  536. if (pos == std::string::npos) {
  537. auto [model_name, manifest_url] = extract_model_and_tag(model, "https://huggingface.co/v2/");
  538. hfr = model_name;
  539. nlohmann::json manifest;
  540. int ret = download_and_parse_manifest(manifest_url, headers, manifest);
  541. if (ret) {
  542. return ret;
  543. }
  544. hff = manifest["ggufFile"]["rfilename"];
  545. } else {
  546. hfr = model.substr(0, pos);
  547. hff = model.substr(pos + 1);
  548. }
  549. url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff;
  550. return download(url, bn, true, headers);
  551. }
  552. int ollama_dl(std::string & model, const std::string & bn) {
  553. const std::vector<std::string> headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" };
  554. if (model.find('/') == std::string::npos) {
  555. model = "library/" + model;
  556. }
  557. auto [model_name, manifest_url] = extract_model_and_tag(model, "https://registry.ollama.ai/v2/");
  558. nlohmann::json manifest;
  559. int ret = download_and_parse_manifest(manifest_url, {}, manifest);
  560. if (ret) {
  561. return ret;
  562. }
  563. std::string layer;
  564. for (const auto & l : manifest["layers"]) {
  565. if (l["mediaType"] == "application/vnd.ollama.image.model") {
  566. layer = l["digest"];
  567. break;
  568. }
  569. }
  570. std::string blob_url = "https://registry.ollama.ai/v2/" + model_name + "/blobs/" + layer;
  571. return download(blob_url, bn, true, headers);
  572. }
  573. int github_dl(const std::string & model, const std::string & bn) {
  574. std::string repository = model;
  575. std::string branch = "main";
  576. const size_t at_pos = model.find('@');
  577. if (at_pos != std::string::npos) {
  578. repository = model.substr(0, at_pos);
  579. branch = model.substr(at_pos + 1);
  580. }
  581. const std::vector<std::string> repo_parts = string_split(repository, "/");
  582. if (repo_parts.size() < 3) {
  583. printe("Invalid GitHub repository format\n");
  584. return 1;
  585. }
  586. const std::string & org = repo_parts[0];
  587. const std::string & project = repo_parts[1];
  588. std::string url = "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch;
  589. for (size_t i = 2; i < repo_parts.size(); ++i) {
  590. url += "/" + repo_parts[i];
  591. }
  592. return download(url, bn, true);
  593. }
  594. int s3_dl(const std::string & model, const std::string & bn) {
  595. const size_t slash_pos = model.find('/');
  596. if (slash_pos == std::string::npos) {
  597. return 1;
  598. }
  599. const std::string bucket = model.substr(0, slash_pos);
  600. const std::string key = model.substr(slash_pos + 1);
  601. const char * access_key = std::getenv("AWS_ACCESS_KEY_ID");
  602. const char * secret_key = std::getenv("AWS_SECRET_ACCESS_KEY");
  603. if (!access_key || !secret_key) {
  604. printe("AWS credentials not found in environment\n");
  605. return 1;
  606. }
  607. // Generate AWS Signature Version 4 headers
  608. // (Implementation requires HMAC-SHA256 and date handling)
  609. // Get current timestamp
  610. const time_t now = time(nullptr);
  611. const tm tm = *gmtime(&now);
  612. const std::string date = strftime_fmt("%Y%m%d", tm);
  613. const std::string datetime = strftime_fmt("%Y%m%dT%H%M%SZ", tm);
  614. const std::vector<std::string> headers = {
  615. "Authorization: AWS4-HMAC-SHA256 Credential=" + std::string(access_key) + "/" + date +
  616. "/us-east-1/s3/aws4_request",
  617. "x-amz-content-sha256: UNSIGNED-PAYLOAD", "x-amz-date: " + datetime
  618. };
  619. const std::string url = "https://" + bucket + ".s3.amazonaws.com/" + key;
  620. return download(url, bn, true, headers);
  621. }
  622. std::string basename(const std::string & path) {
  623. const size_t pos = path.find_last_of("/\\");
  624. if (pos == std::string::npos) {
  625. return path;
  626. }
  627. return path.substr(pos + 1);
  628. }
  629. int rm_until_substring(std::string & model_, const std::string & substring) {
  630. const std::string::size_type pos = model_.find(substring);
  631. if (pos == std::string::npos) {
  632. return 1;
  633. }
  634. model_ = model_.substr(pos + substring.size()); // Skip past the substring
  635. return 0;
  636. }
  637. int resolve_model(std::string & model_) {
  638. int ret = 0;
  639. if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) {
  640. rm_until_substring(model_, "://");
  641. return ret;
  642. }
  643. const std::string bn = basename(model_);
  644. if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://") ||
  645. string_starts_with(model_, "hf.co/")) {
  646. rm_until_substring(model_, "hf.co/");
  647. rm_until_substring(model_, "://");
  648. ret = huggingface_dl(model_, bn);
  649. } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) &&
  650. !string_starts_with(model_, "https://ollama.com/library/")) {
  651. ret = download(model_, bn, true);
  652. } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) {
  653. rm_until_substring(model_, "github:");
  654. rm_until_substring(model_, "://");
  655. ret = github_dl(model_, bn);
  656. } else if (string_starts_with(model_, "s3://")) {
  657. rm_until_substring(model_, "://");
  658. ret = s3_dl(model_, bn);
  659. } else { // ollama:// or nothing
  660. rm_until_substring(model_, "ollama.com/library/");
  661. rm_until_substring(model_, "://");
  662. ret = ollama_dl(model_, bn);
  663. }
  664. model_ = bn;
  665. return ret;
  666. }
  667. // Initializes the model and returns a unique pointer to it
  668. llama_model_ptr initialize_model(Opt & opt) {
  669. ggml_backend_load_all();
  670. resolve_model(opt.model_);
  671. printe("\r" LOG_CLR_TO_EOL "Loading model");
  672. llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
  673. if (!model) {
  674. printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
  675. }
  676. printe("\r" LOG_CLR_TO_EOL);
  677. return model;
  678. }
  679. // Initializes the context with the specified parameters
  680. llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
  681. llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params));
  682. if (!context) {
  683. printe("%s: error: failed to create the llama_context\n", __func__);
  684. }
  685. return context;
  686. }
  687. // Initializes and configures the sampler
  688. llama_sampler_ptr initialize_sampler(const Opt & opt) {
  689. llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
  690. llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
  691. llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature));
  692. llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
  693. return sampler;
  694. }
  695. };
  696. // Add a message to `messages` and store its content in `msg_strs`
  697. static void add_message(const char * role, const std::string & text, LlamaData & llama_data) {
  698. llama_data.msg_strs.push_back(std::move(text));
  699. llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() });
  700. }
  701. // Function to apply the chat template and resize `formatted` if needed
  702. static int apply_chat_template(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, bool use_jinja) {
  703. if (use_jinja) {
  704. json messages = json::array();
  705. for (const auto & msg : llama_data.messages) {
  706. messages.push_back({
  707. {"role", msg.role},
  708. {"content", msg.content},
  709. });
  710. }
  711. try {
  712. minja::chat_template_inputs tmpl_inputs;
  713. tmpl_inputs.messages = messages;
  714. tmpl_inputs.add_generation_prompt = append;
  715. minja::chat_template_options tmpl_opts;
  716. tmpl_opts.use_bos_token = false;
  717. tmpl_opts.use_eos_token = false;
  718. auto result = tmpl.apply(tmpl_inputs, tmpl_opts);
  719. llama_data.fmtted.resize(result.size() + 1);
  720. memcpy(llama_data.fmtted.data(), result.c_str(), result.size() + 1);
  721. return result.size();
  722. } catch (const std::exception & e) {
  723. printe("failed to render the chat template: %s\n", e.what());
  724. return -1;
  725. }
  726. }
  727. int result = llama_chat_apply_template(
  728. tmpl.source().c_str(), llama_data.messages.data(), llama_data.messages.size(), append,
  729. append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0);
  730. if (append && result > static_cast<int>(llama_data.fmtted.size())) {
  731. llama_data.fmtted.resize(result);
  732. result = llama_chat_apply_template(tmpl.source().c_str(), llama_data.messages.data(),
  733. llama_data.messages.size(), append, llama_data.fmtted.data(),
  734. llama_data.fmtted.size());
  735. }
  736. return result;
  737. }
  738. // Function to tokenize the prompt
  739. static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
  740. std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
  741. const bool is_first = llama_get_kv_cache_used_cells(llama_data.context.get()) == 0;
  742. const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
  743. prompt_tokens.resize(n_prompt_tokens);
  744. if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), is_first,
  745. true) < 0) {
  746. printe("failed to tokenize the prompt\n");
  747. return -1;
  748. }
  749. return n_prompt_tokens;
  750. }
  751. // Check if we have enough space in the context to evaluate this batch
  752. static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) {
  753. const int n_ctx = llama_n_ctx(ctx.get());
  754. const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get());
  755. if (n_ctx_used + batch.n_tokens > n_ctx) {
  756. printf(LOG_COL_DEFAULT "\n");
  757. printe("context size exceeded\n");
  758. return 1;
  759. }
  760. return 0;
  761. }
  762. // convert the token to a string
  763. static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) {
  764. char buf[256];
  765. int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true);
  766. if (n < 0) {
  767. printe("failed to convert token to piece\n");
  768. return 1;
  769. }
  770. piece = std::string(buf, n);
  771. return 0;
  772. }
  773. static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) {
  774. printf("%s", piece.c_str());
  775. fflush(stdout);
  776. response += piece;
  777. }
  778. // helper function to evaluate a prompt and generate a response
  779. static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) {
  780. const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get());
  781. std::vector<llama_token> tokens;
  782. if (tokenize_prompt(vocab, prompt, tokens, llama_data) < 0) {
  783. return 1;
  784. }
  785. // prepare a batch for the prompt
  786. llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size());
  787. llama_token new_token_id;
  788. while (true) {
  789. check_context_size(llama_data.context, batch);
  790. if (llama_decode(llama_data.context.get(), batch)) {
  791. printe("failed to decode\n");
  792. return 1;
  793. }
  794. // sample the next token, check is it an end of generation?
  795. new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1);
  796. if (llama_vocab_is_eog(vocab, new_token_id)) {
  797. break;
  798. }
  799. std::string piece;
  800. if (convert_token_to_string(vocab, new_token_id, piece)) {
  801. return 1;
  802. }
  803. print_word_and_concatenate_to_response(piece, response);
  804. // prepare the next batch with the sampled token
  805. batch = llama_batch_get_one(&new_token_id, 1);
  806. }
  807. printf(LOG_COL_DEFAULT);
  808. return 0;
  809. }
  810. static int read_user_input(std::string & user_input) {
  811. static const char * prompt_prefix = "> ";
  812. #ifdef WIN32
  813. printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
  814. std::getline(std::cin, user_input);
  815. if (std::cin.eof()) {
  816. printf("\n");
  817. return 1;
  818. }
  819. #else
  820. std::unique_ptr<char, decltype(&std::free)> line(const_cast<char *>(linenoise(prompt_prefix)), free);
  821. if (!line) {
  822. return 1;
  823. }
  824. user_input = line.get();
  825. #endif
  826. if (user_input == "/bye") {
  827. return 1;
  828. }
  829. if (user_input.empty()) {
  830. return 2;
  831. }
  832. #ifndef WIN32
  833. linenoiseHistoryAdd(line.get());
  834. #endif
  835. return 0; // Should have data in happy path
  836. }
  837. // Function to generate a response based on the prompt
  838. static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response,
  839. const bool stdout_a_terminal) {
  840. // Set response color
  841. if (stdout_a_terminal) {
  842. printf(LOG_COL_YELLOW);
  843. }
  844. if (generate(llama_data, prompt, response)) {
  845. printe("failed to generate response\n");
  846. return 1;
  847. }
  848. // End response with color reset and newline
  849. printf("\n%s", stdout_a_terminal ? LOG_COL_DEFAULT : "");
  850. return 0;
  851. }
  852. // Helper function to apply the chat template and handle errors
  853. static int apply_chat_template_with_error_handling(const common_chat_template & tmpl, LlamaData & llama_data, const bool append, int & output_length, bool use_jinja) {
  854. const int new_len = apply_chat_template(tmpl, llama_data, append, use_jinja);
  855. if (new_len < 0) {
  856. printe("failed to apply the chat template\n");
  857. return -1;
  858. }
  859. output_length = new_len;
  860. return 0;
  861. }
  862. // Helper function to handle user input
  863. static int handle_user_input(std::string & user_input, const std::string & user) {
  864. if (!user.empty()) {
  865. user_input = user;
  866. return 0; // No need for interactive input
  867. }
  868. return read_user_input(user_input); // Returns true if input ends the loop
  869. }
  870. static bool is_stdin_a_terminal() {
  871. #if defined(_WIN32)
  872. HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
  873. DWORD mode;
  874. return GetConsoleMode(hStdin, &mode);
  875. #else
  876. return isatty(STDIN_FILENO);
  877. #endif
  878. }
  879. static bool is_stdout_a_terminal() {
  880. #if defined(_WIN32)
  881. HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
  882. DWORD mode;
  883. return GetConsoleMode(hStdout, &mode);
  884. #else
  885. return isatty(STDOUT_FILENO);
  886. #endif
  887. }
  888. // Function to handle user input
  889. static int get_user_input(std::string & user_input, const std::string & user) {
  890. while (true) {
  891. const int ret = handle_user_input(user_input, user);
  892. if (ret == 1) {
  893. return 1;
  894. }
  895. if (ret == 2) {
  896. continue;
  897. }
  898. break;
  899. }
  900. return 0;
  901. }
  902. // Main chat loop function
  903. static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_jinja) {
  904. int prev_len = 0;
  905. llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
  906. auto chat_templates = common_chat_templates_from_model(llama_data.model.get(), "");
  907. GGML_ASSERT(chat_templates.template_default);
  908. static const bool stdout_a_terminal = is_stdout_a_terminal();
  909. while (true) {
  910. // Get user input
  911. std::string user_input;
  912. if (get_user_input(user_input, user) == 1) {
  913. return 0;
  914. }
  915. add_message("user", user.empty() ? user_input : user, llama_data);
  916. int new_len;
  917. if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, true, new_len, use_jinja) < 0) {
  918. return 1;
  919. }
  920. std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len);
  921. std::string response;
  922. if (generate_response(llama_data, prompt, response, stdout_a_terminal)) {
  923. return 1;
  924. }
  925. if (!user.empty()) {
  926. break;
  927. }
  928. add_message("assistant", response, llama_data);
  929. if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, false, prev_len, use_jinja) < 0) {
  930. return 1;
  931. }
  932. }
  933. return 0;
  934. }
  935. static void log_callback(const enum ggml_log_level level, const char * text, void * p) {
  936. const Opt * opt = static_cast<Opt *>(p);
  937. if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) {
  938. printe("%s", text);
  939. }
  940. }
  941. static std::string read_pipe_data() {
  942. std::ostringstream result;
  943. result << std::cin.rdbuf(); // Read all data from std::cin
  944. return result.str();
  945. }
  946. static void ctrl_c_handling() {
  947. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  948. struct sigaction sigint_action;
  949. sigint_action.sa_handler = sigint_handler;
  950. sigemptyset(&sigint_action.sa_mask);
  951. sigint_action.sa_flags = 0;
  952. sigaction(SIGINT, &sigint_action, NULL);
  953. #elif defined(_WIN32)
  954. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  955. return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
  956. };
  957. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  958. #endif
  959. }
  960. int main(int argc, const char ** argv) {
  961. ctrl_c_handling();
  962. Opt opt;
  963. const int ret = opt.init(argc, argv);
  964. if (ret == 2) {
  965. return 0;
  966. } else if (ret) {
  967. return 1;
  968. }
  969. if (!is_stdin_a_terminal()) {
  970. if (!opt.user.empty()) {
  971. opt.user += "\n\n";
  972. }
  973. opt.user += read_pipe_data();
  974. }
  975. llama_log_set(log_callback, &opt);
  976. LlamaData llama_data;
  977. if (llama_data.init(opt)) {
  978. return 1;
  979. }
  980. if (chat_loop(llama_data, opt.user, opt.use_jinja)) {
  981. return 1;
  982. }
  983. return 0;
  984. }