common.cpp 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076
  1. #if defined(_MSC_VER)
  2. #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
  3. #endif
  4. #include "ggml.h"
  5. #include "gguf.h"
  6. #include "common.h"
  7. #include "log.h"
  8. // Change JSON_ASSERT from assert() to GGML_ASSERT:
  9. #define JSON_ASSERT GGML_ASSERT
  10. #include "json.hpp"
  11. #include "json-schema-to-grammar.h"
  12. #include "llama.h"
  13. #include <algorithm>
  14. #include <cinttypes>
  15. #include <climits>
  16. #include <cmath>
  17. #include <codecvt>
  18. #include <cstdarg>
  19. #include <cstring>
  20. #include <ctime>
  21. #include <filesystem>
  22. #include <fstream>
  23. #include <iostream>
  24. #include <iterator>
  25. #include <regex>
  26. #include <sstream>
  27. #include <string>
  28. #include <thread>
  29. #include <unordered_map>
  30. #include <unordered_set>
  31. #include <vector>
  32. #if defined(__APPLE__) && defined(__MACH__)
  33. #include <sys/types.h>
  34. #include <sys/sysctl.h>
  35. #endif
  36. #if defined(_WIN32)
  37. #define WIN32_LEAN_AND_MEAN
  38. #ifndef NOMINMAX
  39. # define NOMINMAX
  40. #endif
  41. #include <locale>
  42. #include <windows.h>
  43. #include <fcntl.h>
  44. #include <io.h>
  45. #else
  46. #include <sys/ioctl.h>
  47. #include <sys/stat.h>
  48. #include <unistd.h>
  49. #endif
  50. #if defined(LLAMA_USE_CURL)
  51. #include <curl/curl.h>
  52. #include <curl/easy.h>
  53. #include <future>
  54. #endif
  55. #if defined(_MSC_VER)
  56. #pragma warning(disable: 4244 4267) // possible loss of data
  57. #endif
  58. #if defined(LLAMA_USE_CURL)
  59. #ifdef __linux__
  60. #include <linux/limits.h>
  61. #elif defined(_WIN32)
  62. # if !defined(PATH_MAX)
  63. # define PATH_MAX MAX_PATH
  64. # endif
  65. #else
  66. #include <sys/syslimits.h>
  67. #endif
  68. #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
  69. //
  70. // CURL utils
  71. //
  72. using curl_ptr = std::unique_ptr<CURL, decltype(&curl_easy_cleanup)>;
  73. // cannot use unique_ptr for curl_slist, because we cannot update without destroying the old one
  74. struct curl_slist_ptr {
  75. struct curl_slist * ptr = nullptr;
  76. ~curl_slist_ptr() {
  77. if (ptr) {
  78. curl_slist_free_all(ptr);
  79. }
  80. }
  81. };
  82. #endif // LLAMA_USE_CURL
  83. using json = nlohmann::ordered_json;
  84. //
  85. // CPU utils
  86. //
  87. int32_t cpu_get_num_physical_cores() {
  88. #ifdef __linux__
  89. // enumerate the set of thread siblings, num entries is num cores
  90. std::unordered_set<std::string> siblings;
  91. for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
  92. std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
  93. + std::to_string(cpu) + "/topology/thread_siblings");
  94. if (!thread_siblings.is_open()) {
  95. break; // no more cpus
  96. }
  97. std::string line;
  98. if (std::getline(thread_siblings, line)) {
  99. siblings.insert(line);
  100. }
  101. }
  102. if (!siblings.empty()) {
  103. return static_cast<int32_t>(siblings.size());
  104. }
  105. #elif defined(__APPLE__) && defined(__MACH__)
  106. int32_t num_physical_cores;
  107. size_t len = sizeof(num_physical_cores);
  108. int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
  109. if (result == 0) {
  110. return num_physical_cores;
  111. }
  112. result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
  113. if (result == 0) {
  114. return num_physical_cores;
  115. }
  116. #elif defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
  117. // TODO: windows + arm64 + mingw64
  118. unsigned int n_threads_win = std::thread::hardware_concurrency();
  119. unsigned int default_threads = n_threads_win > 0 ? (n_threads_win <= 4 ? n_threads_win : n_threads_win / 2) : 4;
  120. DWORD buffer_size = 0;
  121. if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &buffer_size)) {
  122. if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
  123. return default_threads;
  124. }
  125. }
  126. std::vector<char> buffer(buffer_size);
  127. if (!GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()), &buffer_size)) {
  128. return default_threads;
  129. }
  130. int32_t num_physical_cores = 0;
  131. PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data());
  132. while (buffer_size > 0) {
  133. if (info->Relationship == RelationProcessorCore) {
  134. num_physical_cores += info->Processor.GroupCount;
  135. }
  136. buffer_size -= info->Size;
  137. info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(reinterpret_cast<char*>(info) + info->Size);
  138. }
  139. return num_physical_cores > 0 ? num_physical_cores : default_threads;
  140. #endif
  141. unsigned int n_threads = std::thread::hardware_concurrency();
  142. return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
  143. }
  144. #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
  145. #include <pthread.h>
  146. static void cpuid(unsigned leaf, unsigned subleaf,
  147. unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) {
  148. __asm__("movq\t%%rbx,%%rsi\n\t"
  149. "cpuid\n\t"
  150. "xchgq\t%%rbx,%%rsi"
  151. : "=a"(*eax), "=S"(*ebx), "=c"(*ecx), "=d"(*edx)
  152. : "0"(leaf), "2"(subleaf));
  153. }
  154. static int pin_cpu(int cpu) {
  155. cpu_set_t mask;
  156. CPU_ZERO(&mask);
  157. CPU_SET(cpu, &mask);
  158. return pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
  159. }
  160. static bool is_hybrid_cpu(void) {
  161. unsigned eax, ebx, ecx, edx;
  162. cpuid(7, 0, &eax, &ebx, &ecx, &edx);
  163. return !!(edx & (1u << 15));
  164. }
  165. static bool is_running_on_efficiency_core(void) {
  166. unsigned eax, ebx, ecx, edx;
  167. cpuid(0x1a, 0, &eax, &ebx, &ecx, &edx);
  168. int intel_atom = 0x20;
  169. int core_type = (eax & 0xff000000u) >> 24;
  170. return core_type == intel_atom;
  171. }
  172. static int cpu_count_math_cpus(int n_cpu) {
  173. int result = 0;
  174. for (int cpu = 0; cpu < n_cpu; ++cpu) {
  175. if (pin_cpu(cpu)) {
  176. return -1;
  177. }
  178. if (is_running_on_efficiency_core()) {
  179. continue; // efficiency cores harm lockstep threading
  180. }
  181. ++cpu; // hyperthreading isn't useful for linear algebra
  182. ++result;
  183. }
  184. return result;
  185. }
  186. #endif // __x86_64__ && __linux__
  187. /**
  188. * Returns number of CPUs on system that are useful for math.
  189. */
  190. int32_t cpu_get_num_math() {
  191. #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
  192. int n_cpu = sysconf(_SC_NPROCESSORS_ONLN);
  193. if (n_cpu < 1) {
  194. return cpu_get_num_physical_cores();
  195. }
  196. if (is_hybrid_cpu()) {
  197. cpu_set_t affinity;
  198. if (!pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity)) {
  199. int result = cpu_count_math_cpus(n_cpu);
  200. pthread_setaffinity_np(pthread_self(), sizeof(affinity), &affinity);
  201. if (result > 0) {
  202. return result;
  203. }
  204. }
  205. }
  206. #endif
  207. return cpu_get_num_physical_cores();
  208. }
  209. // Helper for setting process priority
  210. #if defined(_WIN32)
  211. bool set_process_priority(enum ggml_sched_priority prio) {
  212. if (prio == GGML_SCHED_PRIO_NORMAL) {
  213. return true;
  214. }
  215. DWORD p = NORMAL_PRIORITY_CLASS;
  216. switch (prio) {
  217. case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break;
  218. case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break;
  219. case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break;
  220. case GGML_SCHED_PRIO_REALTIME: p = REALTIME_PRIORITY_CLASS; break;
  221. }
  222. if (!SetPriorityClass(GetCurrentProcess(), p)) {
  223. LOG_WRN("failed to set process priority class %d : (%d)\n", prio, (int) GetLastError());
  224. return false;
  225. }
  226. return true;
  227. }
  228. #else // MacOS and POSIX
  229. #include <sys/types.h>
  230. #include <sys/resource.h>
  231. bool set_process_priority(enum ggml_sched_priority prio) {
  232. if (prio == GGML_SCHED_PRIO_NORMAL) {
  233. return true;
  234. }
  235. int p = 0;
  236. switch (prio) {
  237. case GGML_SCHED_PRIO_NORMAL: p = 0; break;
  238. case GGML_SCHED_PRIO_MEDIUM: p = -5; break;
  239. case GGML_SCHED_PRIO_HIGH: p = -10; break;
  240. case GGML_SCHED_PRIO_REALTIME: p = -20; break;
  241. }
  242. if (!setpriority(PRIO_PROCESS, 0, p)) {
  243. LOG_WRN("failed to set process priority %d : %s (%d)\n", prio, strerror(errno), errno);
  244. return false;
  245. }
  246. return true;
  247. }
  248. #endif
  249. //
  250. // CLI argument parsing
  251. //
  252. void postprocess_cpu_params(cpu_params& cpuparams, const cpu_params* role_model) {
  253. int32_t n_set = 0;
  254. if (cpuparams.n_threads < 0) {
  255. // Assuming everything about cpuparams is invalid
  256. if (role_model != nullptr) {
  257. cpuparams = *role_model;
  258. } else {
  259. cpuparams.n_threads = cpu_get_num_math();
  260. }
  261. }
  262. for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
  263. if (cpuparams.cpumask[i]) {
  264. n_set++;
  265. }
  266. }
  267. if (n_set && n_set < cpuparams.n_threads) {
  268. // Not enough set bits, may experience performance issues.
  269. LOG_WRN("Not enough set bits in CPU mask (%d) to satisfy requested thread count: %d\n", n_set, cpuparams.n_threads);
  270. }
  271. }
  272. bool parse_cpu_range(const std::string & range, bool (&boolmask)[GGML_MAX_N_THREADS]) {
  273. size_t dash_loc = range.find('-');
  274. if (dash_loc == std::string::npos) {
  275. LOG_ERR("Format of CPU range is invalid! Expected [<start>]-[<end>].\n");
  276. return false;
  277. }
  278. size_t start_i;
  279. size_t end_i;
  280. if (dash_loc == 0) {
  281. start_i = 0;
  282. } else {
  283. start_i = std::stoull(range.substr(0, dash_loc));
  284. if (start_i >= GGML_MAX_N_THREADS) {
  285. LOG_ERR("Start index out of bounds!\n");
  286. return false;
  287. }
  288. }
  289. if (dash_loc == range.length() - 1) {
  290. end_i = GGML_MAX_N_THREADS - 1;
  291. } else {
  292. end_i = std::stoull(range.substr(dash_loc + 1));
  293. if (end_i >= GGML_MAX_N_THREADS) {
  294. LOG_ERR("End index out of bounds!\n");
  295. return false;
  296. }
  297. }
  298. for (size_t i = start_i; i <= end_i; i++) {
  299. boolmask[i] = true;
  300. }
  301. return true;
  302. }
  303. bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREADS]) {
  304. // Discard potential 0x prefix
  305. size_t start_i = 0;
  306. if (mask.length() >= 2 && mask.substr(0, 2) == "0x") {
  307. start_i = 2;
  308. }
  309. size_t num_digits = mask.length() - start_i;
  310. if (num_digits > 128) num_digits = 128;
  311. size_t end_i = num_digits + start_i;
  312. for (size_t i = start_i, n = (num_digits*4 - 1); i < end_i; i++, n-=4) {
  313. char c = mask.at(i);
  314. int8_t id = c;
  315. if ((c >= '0' && c <= '9')) {
  316. id -= '0';
  317. } else if (c >= 'a' && c <= 'f') {
  318. id -= 'a' - 10;
  319. } else if (c >= 'A' && c <= 'F') {
  320. id -= 'A' - 10;
  321. } else {
  322. LOG_ERR("Invalid hex character '%c' at position %d\n", c, int32_t(i));
  323. return false;
  324. }
  325. boolmask[ n ] = boolmask[ n ] || ((id & 8) != 0);
  326. boolmask[n - 1] = boolmask[n - 1] || ((id & 4) != 0);
  327. boolmask[n - 2] = boolmask[n - 2] || ((id & 2) != 0);
  328. boolmask[n - 3] = boolmask[n - 3] || ((id & 1) != 0);
  329. }
  330. return true;
  331. }
  332. void common_init() {
  333. llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
  334. if (LOG_DEFAULT_LLAMA <= common_log_verbosity_thold) {
  335. common_log_add(common_log_main(), level, "%s", text);
  336. }
  337. }, NULL);
  338. #ifdef NDEBUG
  339. const char * build_type = "";
  340. #else
  341. const char * build_type = " (debug)";
  342. #endif
  343. LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type);
  344. }
  345. std::string common_params_get_system_info(const common_params & params) {
  346. std::ostringstream os;
  347. os << "system_info: n_threads = " << params.cpuparams.n_threads;
  348. if (params.cpuparams_batch.n_threads != -1) {
  349. os << " (n_threads_batch = " << params.cpuparams_batch.n_threads << ")";
  350. }
  351. #if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
  352. // TODO: windows + arm64 + mingw64
  353. DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
  354. os << " / " << logicalProcessorCount << " | " << llama_print_system_info();
  355. #else
  356. os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info();
  357. #endif
  358. return os.str();
  359. }
  360. //
  361. // String utils
  362. //
  363. std::string string_format(const char * fmt, ...) {
  364. va_list ap;
  365. va_list ap2;
  366. va_start(ap, fmt);
  367. va_copy(ap2, ap);
  368. int size = vsnprintf(NULL, 0, fmt, ap);
  369. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  370. std::vector<char> buf(size + 1);
  371. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  372. GGML_ASSERT(size2 == size);
  373. va_end(ap2);
  374. va_end(ap);
  375. return std::string(buf.data(), size);
  376. }
  377. std::string string_strip(const std::string & str) {
  378. size_t start = 0;
  379. size_t end = str.size();
  380. while (start < end && std::isspace(str[start])) {
  381. start++;
  382. }
  383. while (end > start && std::isspace(str[end - 1])) {
  384. end--;
  385. }
  386. return str.substr(start, end - start);
  387. }
  388. std::string string_get_sortable_timestamp() {
  389. using clock = std::chrono::system_clock;
  390. const clock::time_point current_time = clock::now();
  391. const time_t as_time_t = clock::to_time_t(current_time);
  392. char timestamp_no_ns[100];
  393. std::strftime(timestamp_no_ns, 100, "%Y_%m_%d-%H_%M_%S", std::localtime(&as_time_t));
  394. const int64_t ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
  395. current_time.time_since_epoch() % 1000000000).count();
  396. char timestamp_ns[11];
  397. snprintf(timestamp_ns, 11, "%09" PRId64, ns);
  398. return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns);
  399. }
  400. void string_replace_all(std::string & s, const std::string & search, const std::string & replace) {
  401. if (search.empty()) {
  402. return;
  403. }
  404. std::string builder;
  405. builder.reserve(s.length());
  406. size_t pos = 0;
  407. size_t last_pos = 0;
  408. while ((pos = s.find(search, last_pos)) != std::string::npos) {
  409. builder.append(s, last_pos, pos - last_pos);
  410. builder.append(replace);
  411. last_pos = pos + search.length();
  412. }
  413. builder.append(s, last_pos, std::string::npos);
  414. s = std::move(builder);
  415. }
  416. std::string string_from(bool value) {
  417. return value ? "true" : "false";
  418. }
  419. std::string string_from(const std::vector<int> & values) {
  420. std::stringstream buf;
  421. buf << "[ ";
  422. bool first = true;
  423. for (auto e : values) {
  424. if (first) {
  425. first = false;
  426. } else {
  427. buf << ", ";
  428. }
  429. buf << std::to_string(e);
  430. }
  431. buf << " ]";
  432. return buf.str();
  433. }
  434. std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens) {
  435. std::stringstream buf;
  436. buf << "[ ";
  437. bool first = true;
  438. for (const auto & token : tokens) {
  439. if (!first) {
  440. buf << ", ";
  441. } else {
  442. first = false;
  443. }
  444. auto detokenized = common_token_to_piece(ctx, token);
  445. detokenized.erase(
  446. std::remove_if(
  447. detokenized.begin(),
  448. detokenized.end(),
  449. [](const unsigned char c) { return !std::isprint(c); }),
  450. detokenized.end());
  451. buf << "'" << detokenized << "'"
  452. << ":" << std::to_string(token);
  453. }
  454. buf << " ]";
  455. return buf.str();
  456. }
  457. std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch) {
  458. std::stringstream buf;
  459. buf << "[ ";
  460. bool first = true;
  461. for (int i = 0; i < batch.n_tokens; ++i) {
  462. if (!first) {
  463. buf << ", ";
  464. } else {
  465. first = false;
  466. }
  467. auto detokenized = common_token_to_piece(ctx, batch.token[i]);
  468. detokenized.erase(
  469. std::remove_if(
  470. detokenized.begin(),
  471. detokenized.end(),
  472. [](const unsigned char c) { return !std::isprint(c); }),
  473. detokenized.end());
  474. buf << "\n" << std::to_string(i)
  475. << ", token '" << detokenized << "'"
  476. << ", pos " << std::to_string(batch.pos[i])
  477. << ", n_seq_id " << std::to_string(batch.n_seq_id[i])
  478. << ", seq_id " << std::to_string(batch.seq_id[i][0])
  479. << ", logits " << std::to_string(batch.logits[i]);
  480. }
  481. buf << " ]";
  482. return buf.str();
  483. }
  484. void string_process_escapes(std::string & input) {
  485. std::size_t input_len = input.length();
  486. std::size_t output_idx = 0;
  487. for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
  488. if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
  489. switch (input[++input_idx]) {
  490. case 'n': input[output_idx++] = '\n'; break;
  491. case 'r': input[output_idx++] = '\r'; break;
  492. case 't': input[output_idx++] = '\t'; break;
  493. case '\'': input[output_idx++] = '\''; break;
  494. case '\"': input[output_idx++] = '\"'; break;
  495. case '\\': input[output_idx++] = '\\'; break;
  496. case 'x':
  497. // Handle \x12, etc
  498. if (input_idx + 2 < input_len) {
  499. const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 };
  500. char *err_p = nullptr;
  501. const long val = std::strtol(x, &err_p, 16);
  502. if (err_p == x + 2) {
  503. input_idx += 2;
  504. input[output_idx++] = char(val);
  505. break;
  506. }
  507. }
  508. // fall through
  509. default: input[output_idx++] = '\\';
  510. input[output_idx++] = input[input_idx]; break;
  511. }
  512. } else {
  513. input[output_idx++] = input[input_idx];
  514. }
  515. }
  516. input.resize(output_idx);
  517. }
  518. bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
  519. const char * sep = strchr(data, '=');
  520. if (sep == nullptr || sep - data >= 128) {
  521. LOG_ERR("%s: malformed KV override '%s'\n", __func__, data);
  522. return false;
  523. }
  524. llama_model_kv_override kvo;
  525. std::strncpy(kvo.key, data, sep - data);
  526. kvo.key[sep - data] = 0;
  527. sep++;
  528. if (strncmp(sep, "int:", 4) == 0) {
  529. sep += 4;
  530. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
  531. kvo.val_i64 = std::atol(sep);
  532. } else if (strncmp(sep, "float:", 6) == 0) {
  533. sep += 6;
  534. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
  535. kvo.val_f64 = std::atof(sep);
  536. } else if (strncmp(sep, "bool:", 5) == 0) {
  537. sep += 5;
  538. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
  539. if (std::strcmp(sep, "true") == 0) {
  540. kvo.val_bool = true;
  541. } else if (std::strcmp(sep, "false") == 0) {
  542. kvo.val_bool = false;
  543. } else {
  544. LOG_ERR("%s: invalid boolean value for KV override '%s'\n", __func__, data);
  545. return false;
  546. }
  547. } else if (strncmp(sep, "str:", 4) == 0) {
  548. sep += 4;
  549. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
  550. if (strlen(sep) > 127) {
  551. LOG_ERR("%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data);
  552. return false;
  553. }
  554. strncpy(kvo.val_str, sep, 127);
  555. kvo.val_str[127] = '\0';
  556. } else {
  557. LOG_ERR("%s: invalid type for KV override '%s'\n", __func__, data);
  558. return false;
  559. }
  560. overrides.emplace_back(std::move(kvo));
  561. return true;
  562. }
  563. //
  564. // Filesystem utils
  565. //
  566. // Validate if a filename is safe to use
  567. // To validate a full path, split the path by the OS-specific path separator, and validate each part with this function
  568. bool fs_validate_filename(const std::string & filename) {
  569. if (!filename.length()) {
  570. // Empty filename invalid
  571. return false;
  572. }
  573. if (filename.length() > 255) {
  574. // Limit at common largest possible filename on Linux filesystems
  575. // to avoid unnecessary further validation
  576. // (On systems with smaller limits it will be caught by the OS)
  577. return false;
  578. }
  579. std::u32string filename_utf32;
  580. try {
  581. #if defined(__clang__)
  582. // disable C++17 deprecation warning for std::codecvt_utf8
  583. # pragma clang diagnostic push
  584. # pragma clang diagnostic ignored "-Wdeprecated-declarations"
  585. #endif
  586. std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter;
  587. #if defined(__clang__)
  588. # pragma clang diagnostic pop
  589. #endif
  590. filename_utf32 = converter.from_bytes(filename);
  591. // If the reverse conversion mismatches, it means overlong UTF-8 sequences were used,
  592. // or invalid encodings were encountered. Reject such attempts
  593. std::string filename_reencoded = converter.to_bytes(filename_utf32);
  594. if (filename_reencoded != filename) {
  595. return false;
  596. }
  597. } catch (const std::exception &) {
  598. return false;
  599. }
  600. // Check for forbidden codepoints:
  601. // - Control characters
  602. // - Unicode equivalents of illegal characters
  603. // - UTF-16 surrogate pairs
  604. // - UTF-8 replacement character
  605. // - Byte order mark (BOM)
  606. // - Illegal characters: / \ : * ? " < > |
  607. for (char32_t c : filename_utf32) {
  608. if (c <= 0x1F // Control characters (C0)
  609. || c == 0x7F // Control characters (DEL)
  610. || (c >= 0x80 && c <= 0x9F) // Control characters (C1)
  611. || c == 0xFF0E // Fullwidth Full Stop (period equivalent)
  612. || c == 0x2215 // Division Slash (forward slash equivalent)
  613. || c == 0x2216 // Set Minus (backslash equivalent)
  614. || (c >= 0xD800 && c <= 0xDFFF) // UTF-16 surrogate pairs
  615. || c == 0xFFFD // Replacement Character (UTF-8)
  616. || c == 0xFEFF // Byte Order Mark (BOM)
  617. || c == '/' || c == '\\' || c == ':' || c == '*' // Illegal characters
  618. || c == '?' || c == '"' || c == '<' || c == '>' || c == '|') {
  619. return false;
  620. }
  621. }
  622. // Reject any leading or trailing ' ', or any trailing '.', these are stripped on Windows and will cause a different filename
  623. // Unicode and other whitespace is not affected, only 0x20 space
  624. if (filename.front() == ' ' || filename.back() == ' ' || filename.back() == '.') {
  625. return false;
  626. }
  627. // Reject any ".." (currently stricter than necessary, it should be fine to just check for == ".." instead)
  628. if (filename.find("..") != std::string::npos) {
  629. return false;
  630. }
  631. // Reject "."
  632. if (filename == ".") {
  633. return false;
  634. }
  635. return true;
  636. }
  637. // returns true if successful, false otherwise
  638. bool fs_create_directory_with_parents(const std::string & path) {
  639. #ifdef _WIN32
  640. std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
  641. std::wstring wpath = converter.from_bytes(path);
  642. // if the path already exists, check whether it's a directory
  643. const DWORD attributes = GetFileAttributesW(wpath.c_str());
  644. if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
  645. return true;
  646. }
  647. size_t pos_slash = 0;
  648. // process path from front to back, procedurally creating directories
  649. while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
  650. const std::wstring subpath = wpath.substr(0, pos_slash);
  651. const wchar_t * test = subpath.c_str();
  652. const bool success = CreateDirectoryW(test, NULL);
  653. if (!success) {
  654. const DWORD error = GetLastError();
  655. // if the path already exists, ensure that it's a directory
  656. if (error == ERROR_ALREADY_EXISTS) {
  657. const DWORD attributes = GetFileAttributesW(subpath.c_str());
  658. if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) {
  659. return false;
  660. }
  661. } else {
  662. return false;
  663. }
  664. }
  665. pos_slash += 1;
  666. }
  667. return true;
  668. #else
  669. // if the path already exists, check whether it's a directory
  670. struct stat info;
  671. if (stat(path.c_str(), &info) == 0) {
  672. return S_ISDIR(info.st_mode);
  673. }
  674. size_t pos_slash = 1; // skip leading slashes for directory creation
  675. // process path from front to back, procedurally creating directories
  676. while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) {
  677. const std::string subpath = path.substr(0, pos_slash);
  678. struct stat info;
  679. // if the path already exists, ensure that it's a directory
  680. if (stat(subpath.c_str(), &info) == 0) {
  681. if (!S_ISDIR(info.st_mode)) {
  682. return false;
  683. }
  684. } else {
  685. // create parent directories
  686. const int ret = mkdir(subpath.c_str(), 0755);
  687. if (ret != 0) {
  688. return false;
  689. }
  690. }
  691. pos_slash += 1;
  692. }
  693. return true;
  694. #endif // _WIN32
  695. }
  696. std::string fs_get_cache_directory() {
  697. std::string cache_directory = "";
  698. auto ensure_trailing_slash = [](std::string p) {
  699. // Make sure to add trailing slash
  700. if (p.back() != DIRECTORY_SEPARATOR) {
  701. p += DIRECTORY_SEPARATOR;
  702. }
  703. return p;
  704. };
  705. if (getenv("LLAMA_CACHE")) {
  706. cache_directory = std::getenv("LLAMA_CACHE");
  707. } else {
  708. #ifdef __linux__
  709. if (std::getenv("XDG_CACHE_HOME")) {
  710. cache_directory = std::getenv("XDG_CACHE_HOME");
  711. } else {
  712. cache_directory = std::getenv("HOME") + std::string("/.cache/");
  713. }
  714. #elif defined(__APPLE__)
  715. cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
  716. #elif defined(_WIN32)
  717. cache_directory = std::getenv("LOCALAPPDATA");
  718. #endif // __linux__
  719. cache_directory = ensure_trailing_slash(cache_directory);
  720. cache_directory += "llama.cpp";
  721. }
  722. return ensure_trailing_slash(cache_directory);
  723. }
  724. std::string fs_get_cache_file(const std::string & filename) {
  725. GGML_ASSERT(filename.find(DIRECTORY_SEPARATOR) == std::string::npos);
  726. std::string cache_directory = fs_get_cache_directory();
  727. const bool success = fs_create_directory_with_parents(cache_directory);
  728. if (!success) {
  729. throw std::runtime_error("failed to create cache directory: " + cache_directory);
  730. }
  731. return cache_directory + filename;
  732. }
  733. //
  734. // Model utils
  735. //
  736. struct common_init_result common_init_from_params(common_params & params) {
  737. common_init_result iparams;
  738. auto mparams = common_model_params_to_llama(params);
  739. llama_model * model = nullptr;
  740. if (!params.hf_repo.empty() && !params.hf_file.empty()) {
  741. model = common_load_model_from_hf(params.hf_repo, params.hf_file, params.model, params.hf_token, mparams);
  742. } else if (!params.model_url.empty()) {
  743. model = common_load_model_from_url(params.model_url, params.model, params.hf_token, mparams);
  744. } else {
  745. model = llama_model_load_from_file(params.model.c_str(), mparams);
  746. }
  747. if (model == NULL) {
  748. LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.c_str());
  749. return iparams;
  750. }
  751. const llama_vocab * vocab = llama_model_get_vocab(model);
  752. if (params.reranking) {
  753. bool ok = true;
  754. if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) {
  755. LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__);
  756. ok = false;
  757. }
  758. if (llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) {
  759. LOG_WRN("%s: warning: vocab does not have an EOS token, reranking will not work\n", __func__);
  760. ok = false;
  761. }
  762. if (llama_vocab_sep(vocab) == LLAMA_TOKEN_NULL) {
  763. LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__);
  764. ok = false;
  765. }
  766. if (!ok) {
  767. llama_model_free(model);
  768. return iparams;
  769. }
  770. }
  771. auto cparams = common_context_params_to_llama(params);
  772. llama_context * lctx = llama_init_from_model(model, cparams);
  773. if (lctx == NULL) {
  774. LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str());
  775. llama_model_free(model);
  776. return iparams;
  777. }
  778. if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) {
  779. LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__);
  780. params.ctx_shift = false;
  781. }
  782. if (!params.control_vectors.empty()) {
  783. if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
  784. if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_model_n_layer(model);
  785. const auto cvec = common_control_vector_load(params.control_vectors);
  786. if (cvec.n_embd == -1) {
  787. llama_free(lctx);
  788. llama_model_free(model);
  789. return iparams;
  790. }
  791. int err = llama_apply_adapter_cvec(
  792. lctx,
  793. cvec.data.data(),
  794. cvec.data.size(),
  795. cvec.n_embd,
  796. params.control_vector_layer_start,
  797. params.control_vector_layer_end);
  798. if (err) {
  799. llama_free(lctx);
  800. llama_model_free(model);
  801. return iparams;
  802. }
  803. }
  804. // load and optionally apply lora adapters
  805. for (auto & la : params.lora_adapters) {
  806. llama_adapter_lora_ptr lora;
  807. lora.reset(llama_adapter_lora_init(model, la.path.c_str()));
  808. if (lora == nullptr) {
  809. LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
  810. llama_free(lctx);
  811. llama_model_free(model);
  812. return iparams;
  813. }
  814. la.ptr = lora.get();
  815. iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
  816. }
  817. if (!params.lora_init_without_apply) {
  818. common_set_adapter_lora(lctx, params.lora_adapters);
  819. }
  820. if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) {
  821. LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__);
  822. params.sampling.ignore_eos = false;
  823. }
  824. if (params.sampling.ignore_eos) {
  825. for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) {
  826. if (llama_vocab_is_eog(vocab, i)) {
  827. LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY);
  828. params.sampling.logit_bias.push_back({i, -INFINITY});
  829. }
  830. }
  831. }
  832. if (params.sampling.penalty_last_n == -1) {
  833. LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
  834. params.sampling.penalty_last_n = llama_n_ctx(lctx);
  835. }
  836. if (params.sampling.dry_penalty_last_n == -1) {
  837. LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
  838. params.sampling.dry_penalty_last_n = llama_n_ctx(lctx);
  839. }
  840. if (params.warmup) {
  841. LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__);
  842. std::vector<llama_token> tmp;
  843. llama_token bos = llama_vocab_bos(vocab);
  844. llama_token eos = llama_vocab_eos(vocab);
  845. // some models (e.g. T5) don't have a BOS token
  846. if (bos != LLAMA_TOKEN_NULL) {
  847. tmp.push_back(bos);
  848. }
  849. if (eos != LLAMA_TOKEN_NULL) {
  850. tmp.push_back(eos);
  851. }
  852. if (tmp.empty()) {
  853. tmp.push_back(0);
  854. }
  855. if (llama_model_has_encoder(model)) {
  856. llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
  857. llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
  858. if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
  859. decoder_start_token_id = bos;
  860. }
  861. tmp.clear();
  862. tmp.push_back(decoder_start_token_id);
  863. }
  864. if (llama_model_has_decoder(model)) {
  865. llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
  866. }
  867. llama_kv_cache_clear(lctx);
  868. llama_synchronize(lctx);
  869. llama_perf_context_reset(lctx);
  870. }
  871. iparams.model.reset(model);
  872. iparams.context.reset(lctx);
  873. return iparams;
  874. }
  875. void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
  876. llama_clear_adapter_lora(ctx);
  877. for (auto & la : lora) {
  878. if (la.scale != 0.0f) {
  879. llama_set_adapter_lora(ctx, la.ptr, la.scale);
  880. }
  881. }
  882. }
  883. struct llama_model_params common_model_params_to_llama(common_params & params) {
  884. auto mparams = llama_model_default_params();
  885. if (!params.devices.empty()) {
  886. mparams.devices = params.devices.data();
  887. }
  888. if (params.n_gpu_layers != -1) {
  889. mparams.n_gpu_layers = params.n_gpu_layers;
  890. }
  891. mparams.rpc_servers = params.rpc_servers.c_str();
  892. mparams.main_gpu = params.main_gpu;
  893. mparams.split_mode = params.split_mode;
  894. mparams.tensor_split = params.tensor_split;
  895. mparams.use_mmap = params.use_mmap;
  896. mparams.use_mlock = params.use_mlock;
  897. mparams.check_tensors = params.check_tensors;
  898. if (params.kv_overrides.empty()) {
  899. mparams.kv_overrides = NULL;
  900. } else {
  901. GGML_ASSERT(params.kv_overrides.back().key[0] == 0 && "KV overrides not terminated with empty key");
  902. mparams.kv_overrides = params.kv_overrides.data();
  903. }
  904. return mparams;
  905. }
  906. struct llama_context_params common_context_params_to_llama(const common_params & params) {
  907. auto cparams = llama_context_default_params();
  908. cparams.n_ctx = params.n_ctx;
  909. cparams.n_seq_max = params.n_parallel;
  910. cparams.n_batch = params.n_batch;
  911. cparams.n_ubatch = params.n_ubatch;
  912. cparams.n_threads = params.cpuparams.n_threads;
  913. cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
  914. params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
  915. cparams.logits_all = params.logits_all;
  916. cparams.embeddings = params.embedding;
  917. cparams.rope_scaling_type = params.rope_scaling_type;
  918. cparams.rope_freq_base = params.rope_freq_base;
  919. cparams.rope_freq_scale = params.rope_freq_scale;
  920. cparams.yarn_ext_factor = params.yarn_ext_factor;
  921. cparams.yarn_attn_factor = params.yarn_attn_factor;
  922. cparams.yarn_beta_fast = params.yarn_beta_fast;
  923. cparams.yarn_beta_slow = params.yarn_beta_slow;
  924. cparams.yarn_orig_ctx = params.yarn_orig_ctx;
  925. cparams.pooling_type = params.pooling_type;
  926. cparams.attention_type = params.attention_type;
  927. cparams.defrag_thold = params.defrag_thold;
  928. cparams.cb_eval = params.cb_eval;
  929. cparams.cb_eval_user_data = params.cb_eval_user_data;
  930. cparams.offload_kqv = !params.no_kv_offload;
  931. cparams.flash_attn = params.flash_attn;
  932. cparams.no_perf = params.no_perf;
  933. if (params.reranking) {
  934. cparams.embeddings = true;
  935. cparams.pooling_type = LLAMA_POOLING_TYPE_RANK;
  936. }
  937. cparams.type_k = params.cache_type_k;
  938. cparams.type_v = params.cache_type_v;
  939. return cparams;
  940. }
  941. struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params) {
  942. struct ggml_threadpool_params tpp;
  943. ggml_threadpool_params_init(&tpp, params.n_threads); // setup the defaults
  944. if (params.mask_valid) {
  945. std::memcpy(&tpp.cpumask, &params.cpumask, GGML_MAX_N_THREADS);
  946. }
  947. tpp.prio = params.priority;
  948. tpp.poll = params.poll;
  949. tpp.strict_cpu = params.strict_cpu;
  950. return tpp;
  951. }
  952. #ifdef LLAMA_USE_CURL
  953. #define CURL_MAX_RETRY 3
  954. #define CURL_RETRY_DELAY_SECONDS 2
  955. static bool curl_perform_with_retry(const std::string & url, CURL * curl, int max_attempts, int retry_delay_seconds) {
  956. int remaining_attempts = max_attempts;
  957. while (remaining_attempts > 0) {
  958. LOG_INF("%s: Trying to download from %s (attempt %d of %d)...\n", __func__ , url.c_str(), max_attempts - remaining_attempts + 1, max_attempts);
  959. CURLcode res = curl_easy_perform(curl);
  960. if (res == CURLE_OK) {
  961. return true;
  962. }
  963. int exponential_backoff_delay = std::pow(retry_delay_seconds, max_attempts - remaining_attempts) * 1000;
  964. LOG_WRN("%s: curl_easy_perform() failed: %s, retrying after %d milliseconds...\n", __func__, curl_easy_strerror(res), exponential_backoff_delay);
  965. remaining_attempts--;
  966. std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
  967. }
  968. LOG_ERR("%s: curl_easy_perform() failed after %d attempts\n", __func__, max_attempts);
  969. return false;
  970. }
  971. static bool common_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
  972. // Initialize libcurl
  973. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  974. curl_slist_ptr http_headers;
  975. if (!curl) {
  976. LOG_ERR("%s: error initializing libcurl\n", __func__);
  977. return false;
  978. }
  979. bool force_download = false;
  980. // Set the URL, allow to follow http redirection
  981. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  982. curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
  983. // Check if hf-token or bearer-token was specified
  984. if (!hf_token.empty()) {
  985. std::string auth_header = "Authorization: Bearer " + hf_token;
  986. http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
  987. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  988. }
  989. #if defined(_WIN32)
  990. // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
  991. // operating system. Currently implemented under MS-Windows.
  992. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  993. #endif
  994. // Check if the file already exists locally
  995. auto file_exists = std::filesystem::exists(path);
  996. // If the file exists, check its JSON metadata companion file.
  997. std::string metadata_path = path + ".json";
  998. nlohmann::json metadata;
  999. std::string etag;
  1000. std::string last_modified;
  1001. if (file_exists) {
  1002. // Try and read the JSON metadata file (note: stream autoclosed upon exiting this block).
  1003. std::ifstream metadata_in(metadata_path);
  1004. if (metadata_in.good()) {
  1005. try {
  1006. metadata_in >> metadata;
  1007. LOG_INF("%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
  1008. if (metadata.contains("url") && metadata.at("url").is_string()) {
  1009. auto previous_url = metadata.at("url").get<std::string>();
  1010. if (previous_url != url) {
  1011. LOG_ERR("%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str());
  1012. return false;
  1013. }
  1014. }
  1015. if (metadata.contains("etag") && metadata.at("etag").is_string()) {
  1016. etag = metadata.at("etag");
  1017. }
  1018. if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
  1019. last_modified = metadata.at("lastModified");
  1020. }
  1021. } catch (const nlohmann::json::exception & e) {
  1022. LOG_ERR("%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
  1023. return false;
  1024. }
  1025. }
  1026. } else {
  1027. LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str());
  1028. }
  1029. // Send a HEAD request to retrieve the etag and last-modified headers
  1030. struct common_load_model_from_url_headers {
  1031. std::string etag;
  1032. std::string last_modified;
  1033. };
  1034. common_load_model_from_url_headers headers;
  1035. {
  1036. typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *);
  1037. auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
  1038. common_load_model_from_url_headers * headers = (common_load_model_from_url_headers *) userdata;
  1039. static std::regex header_regex("([^:]+): (.*)\r\n");
  1040. static std::regex etag_regex("ETag", std::regex_constants::icase);
  1041. static std::regex last_modified_regex("Last-Modified", std::regex_constants::icase);
  1042. std::string header(buffer, n_items);
  1043. std::smatch match;
  1044. if (std::regex_match(header, match, header_regex)) {
  1045. const std::string & key = match[1];
  1046. const std::string & value = match[2];
  1047. if (std::regex_match(key, match, etag_regex)) {
  1048. headers->etag = value;
  1049. } else if (std::regex_match(key, match, last_modified_regex)) {
  1050. headers->last_modified = value;
  1051. }
  1052. }
  1053. return n_items;
  1054. };
  1055. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
  1056. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); // hide head request progress
  1057. curl_easy_setopt(curl.get(), CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
  1058. curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
  1059. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
  1060. if (!was_perform_successful) {
  1061. return false;
  1062. }
  1063. long http_code = 0;
  1064. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  1065. if (http_code != 200) {
  1066. // HEAD not supported, we don't know if the file has changed
  1067. // force trigger downloading
  1068. force_download = true;
  1069. LOG_ERR("%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
  1070. }
  1071. }
  1072. bool should_download = !file_exists || force_download;
  1073. if (!should_download) {
  1074. if (!etag.empty() && etag != headers.etag) {
  1075. LOG_WRN("%s: ETag header is different (%s != %s): triggering a new download\n", __func__, etag.c_str(), headers.etag.c_str());
  1076. should_download = true;
  1077. } else if (!last_modified.empty() && last_modified != headers.last_modified) {
  1078. LOG_WRN("%s: Last-Modified header is different (%s != %s): triggering a new download\n", __func__, last_modified.c_str(), headers.last_modified.c_str());
  1079. should_download = true;
  1080. }
  1081. }
  1082. if (should_download) {
  1083. std::string path_temporary = path + ".downloadInProgress";
  1084. if (file_exists) {
  1085. LOG_WRN("%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
  1086. if (remove(path.c_str()) != 0) {
  1087. LOG_ERR("%s: unable to delete file: %s\n", __func__, path.c_str());
  1088. return false;
  1089. }
  1090. }
  1091. // Set the output file
  1092. struct FILE_deleter {
  1093. void operator()(FILE * f) const {
  1094. fclose(f);
  1095. }
  1096. };
  1097. std::unique_ptr<FILE, FILE_deleter> outfile(fopen(path_temporary.c_str(), "wb"));
  1098. if (!outfile) {
  1099. LOG_ERR("%s: error opening local file for writing: %s\n", __func__, path.c_str());
  1100. return false;
  1101. }
  1102. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * data, size_t size, size_t nmemb, void * fd);
  1103. auto write_callback = [](void * data, size_t size, size_t nmemb, void * fd) -> size_t {
  1104. return fwrite(data, size, nmemb, (FILE *)fd);
  1105. };
  1106. curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 0L);
  1107. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  1108. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, outfile.get());
  1109. // display download progress
  1110. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 0L);
  1111. // helper function to hide password in URL
  1112. auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
  1113. std::size_t protocol_pos = url.find("://");
  1114. if (protocol_pos == std::string::npos) {
  1115. return url; // Malformed URL
  1116. }
  1117. std::size_t at_pos = url.find('@', protocol_pos + 3);
  1118. if (at_pos == std::string::npos) {
  1119. return url; // No password in URL
  1120. }
  1121. return url.substr(0, protocol_pos + 3) + "********" + url.substr(at_pos);
  1122. };
  1123. // start the download
  1124. LOG_INF("%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
  1125. llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
  1126. bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
  1127. if (!was_perform_successful) {
  1128. return false;
  1129. }
  1130. long http_code = 0;
  1131. curl_easy_getinfo (curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
  1132. if (http_code < 200 || http_code >= 400) {
  1133. LOG_ERR("%s: invalid http status code received: %ld\n", __func__, http_code);
  1134. return false;
  1135. }
  1136. // Causes file to be closed explicitly here before we rename it.
  1137. outfile.reset();
  1138. // Write the updated JSON metadata file.
  1139. metadata.update({
  1140. {"url", url},
  1141. {"etag", headers.etag},
  1142. {"lastModified", headers.last_modified}
  1143. });
  1144. std::ofstream(metadata_path) << metadata.dump(4);
  1145. LOG_INF("%s: file metadata saved: %s\n", __func__, metadata_path.c_str());
  1146. if (rename(path_temporary.c_str(), path.c_str()) != 0) {
  1147. LOG_ERR("%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
  1148. return false;
  1149. }
  1150. }
  1151. return true;
  1152. }
  1153. struct llama_model * common_load_model_from_url(
  1154. const std::string & model_url,
  1155. const std::string & local_path,
  1156. const std::string & hf_token,
  1157. const struct llama_model_params & params) {
  1158. // Basic validation of the model_url
  1159. if (model_url.empty()) {
  1160. LOG_ERR("%s: invalid model_url\n", __func__);
  1161. return NULL;
  1162. }
  1163. if (!common_download_file(model_url, local_path, hf_token)) {
  1164. return NULL;
  1165. }
  1166. // check for additional GGUFs split to download
  1167. int n_split = 0;
  1168. {
  1169. struct gguf_init_params gguf_params = {
  1170. /*.no_alloc = */ true,
  1171. /*.ctx = */ NULL,
  1172. };
  1173. auto * ctx_gguf = gguf_init_from_file(local_path.c_str(), gguf_params);
  1174. if (!ctx_gguf) {
  1175. LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, local_path.c_str());
  1176. return NULL;
  1177. }
  1178. auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
  1179. if (key_n_split >= 0) {
  1180. n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
  1181. }
  1182. gguf_free(ctx_gguf);
  1183. }
  1184. if (n_split > 1) {
  1185. char split_prefix[PATH_MAX] = {0};
  1186. char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  1187. // Verify the first split file format
  1188. // and extract split URL and PATH prefixes
  1189. {
  1190. if (!llama_split_prefix(split_prefix, sizeof(split_prefix), local_path.c_str(), 0, n_split)) {
  1191. LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, local_path.c_str(), n_split);
  1192. return NULL;
  1193. }
  1194. if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url.c_str(), 0, n_split)) {
  1195. LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model_url.c_str(), n_split);
  1196. return NULL;
  1197. }
  1198. }
  1199. // Prepare download in parallel
  1200. std::vector<std::future<bool>> futures_download;
  1201. for (int idx = 1; idx < n_split; idx++) {
  1202. futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split, hf_token](int download_idx) -> bool {
  1203. char split_path[PATH_MAX] = {0};
  1204. llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split);
  1205. char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
  1206. llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
  1207. return common_download_file(split_url, split_path, hf_token);
  1208. }, idx));
  1209. }
  1210. // Wait for all downloads to complete
  1211. for (auto & f : futures_download) {
  1212. if (!f.get()) {
  1213. return NULL;
  1214. }
  1215. }
  1216. }
  1217. return llama_model_load_from_file(local_path.c_str(), params);
  1218. }
  1219. struct llama_model * common_load_model_from_hf(
  1220. const std::string & repo,
  1221. const std::string & remote_path,
  1222. const std::string & local_path,
  1223. const std::string & hf_token,
  1224. const struct llama_model_params & params) {
  1225. // construct hugging face model url:
  1226. //
  1227. // --repo ggml-org/models --file tinyllama-1.1b/ggml-model-f16.gguf
  1228. // https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf
  1229. //
  1230. // --repo TheBloke/Mixtral-8x7B-v0.1-GGUF --file mixtral-8x7b-v0.1.Q4_K_M.gguf
  1231. // https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/resolve/main/mixtral-8x7b-v0.1.Q4_K_M.gguf
  1232. //
  1233. std::string model_url = "https://huggingface.co/";
  1234. model_url += repo;
  1235. model_url += "/resolve/main/";
  1236. model_url += remote_path;
  1237. return common_load_model_from_url(model_url, local_path, hf_token, params);
  1238. }
  1239. /**
  1240. * Allow getting the HF file from the HF repo with tag (like ollama), for example:
  1241. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q4
  1242. * - bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
  1243. * - bartowski/Llama-3.2-3B-Instruct-GGUF:q5_k_s
  1244. * Tag is optional, default to "latest" (meaning it checks for Q4_K_M first, then Q4, then if not found, return the first GGUF file in repo)
  1245. *
  1246. * Return pair of <repo, file> (with "repo" already having tag removed)
  1247. *
  1248. * Note: we use the Ollama-compatible HF API, but not using the blobId. Instead, we use the special "ggufFile" field which returns the value for "hf_file". This is done to be backward-compatible with existing cache files.
  1249. */
  1250. std::pair<std::string, std::string> common_get_hf_file(const std::string & hf_repo_with_tag, const std::string & hf_token) {
  1251. auto parts = string_split<std::string>(hf_repo_with_tag, ':');
  1252. std::string tag = parts.size() > 1 ? parts.back() : "latest";
  1253. std::string hf_repo = parts[0];
  1254. if (string_split<std::string>(hf_repo, '/').size() != 2) {
  1255. throw std::invalid_argument("error: invalid HF repo format, expected <user>/<model>[:quant]\n");
  1256. }
  1257. // fetch model info from Hugging Face Hub API
  1258. json model_info;
  1259. curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
  1260. curl_slist_ptr http_headers;
  1261. std::string res_str;
  1262. std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag;
  1263. curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
  1264. curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
  1265. typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
  1266. auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t {
  1267. static_cast<std::string *>(data)->append((char * ) ptr, size * nmemb);
  1268. return size * nmemb;
  1269. };
  1270. curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
  1271. curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_str);
  1272. #if defined(_WIN32)
  1273. curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
  1274. #endif
  1275. if (!hf_token.empty()) {
  1276. std::string auth_header = "Authorization: Bearer " + hf_token;
  1277. http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
  1278. }
  1279. // Important: the User-Agent must be "llama-cpp" to get the "ggufFile" field in the response
  1280. http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
  1281. http_headers.ptr = curl_slist_append(http_headers.ptr, "Accept: application/json");
  1282. curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
  1283. CURLcode res = curl_easy_perform(curl.get());
  1284. if (res != CURLE_OK) {
  1285. throw std::runtime_error("error: cannot make GET request to HF API");
  1286. }
  1287. long res_code;
  1288. curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code);
  1289. if (res_code == 200) {
  1290. model_info = json::parse(res_str);
  1291. } else if (res_code == 401) {
  1292. throw std::runtime_error("error: model is private or does not exist; if you are accessing a gated model, please provide a valid HF token");
  1293. } else {
  1294. throw std::runtime_error(string_format("error from HF API, response code: %ld, data: %s", res_code, res_str.c_str()));
  1295. }
  1296. // check response
  1297. if (!model_info.contains("ggufFile")) {
  1298. throw std::runtime_error("error: model does not have ggufFile");
  1299. }
  1300. json & gguf_file = model_info.at("ggufFile");
  1301. if (!gguf_file.contains("rfilename")) {
  1302. throw std::runtime_error("error: ggufFile does not have rfilename");
  1303. }
  1304. return std::make_pair(hf_repo, gguf_file.at("rfilename"));
  1305. }
  1306. #else
  1307. struct llama_model * common_load_model_from_url(
  1308. const std::string & /*model_url*/,
  1309. const std::string & /*local_path*/,
  1310. const std::string & /*hf_token*/,
  1311. const struct llama_model_params & /*params*/) {
  1312. LOG_WRN("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
  1313. return nullptr;
  1314. }
  1315. struct llama_model * common_load_model_from_hf(
  1316. const std::string & /*repo*/,
  1317. const std::string & /*remote_path*/,
  1318. const std::string & /*local_path*/,
  1319. const std::string & /*hf_token*/,
  1320. const struct llama_model_params & /*params*/) {
  1321. LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
  1322. return nullptr;
  1323. }
  1324. std::pair<std::string, std::string> common_get_hf_file(const std::string &, const std::string &) {
  1325. LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
  1326. return std::make_pair("", "");
  1327. }
  1328. #endif // LLAMA_USE_CURL
  1329. //
  1330. // Batch utils
  1331. //
  1332. void common_batch_clear(struct llama_batch & batch) {
  1333. batch.n_tokens = 0;
  1334. }
  1335. void common_batch_add(
  1336. struct llama_batch & batch,
  1337. llama_token id,
  1338. llama_pos pos,
  1339. const std::vector<llama_seq_id> & seq_ids,
  1340. bool logits) {
  1341. GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded");
  1342. batch.token [batch.n_tokens] = id;
  1343. batch.pos [batch.n_tokens] = pos;
  1344. batch.n_seq_id[batch.n_tokens] = seq_ids.size();
  1345. for (size_t i = 0; i < seq_ids.size(); ++i) {
  1346. batch.seq_id[batch.n_tokens][i] = seq_ids[i];
  1347. }
  1348. batch.logits [batch.n_tokens] = logits;
  1349. batch.n_tokens++;
  1350. }
  1351. //
  1352. // Token utils
  1353. //
  1354. size_t common_lcp(const llama_tokens & a, const llama_tokens & b) {
  1355. size_t i;
  1356. for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
  1357. return i;
  1358. }
  1359. size_t common_lcs(const llama_tokens & a, const llama_tokens & b) {
  1360. // check for empty sequences
  1361. if (a.empty() || b.empty()) {
  1362. return 0;
  1363. }
  1364. // get the lengths of the input sequences
  1365. size_t a_len = a.size();
  1366. size_t b_len = b.size();
  1367. // initialize the maximum length of the longest common subsequence (LCS)
  1368. size_t max_length = 0;
  1369. // use two rows instead of a 2D matrix to optimize space
  1370. std::vector<size_t> prev_row(b_len + 1, 0);
  1371. std::vector<size_t> curr_row(b_len + 1, 0);
  1372. // iterate through the elements of a
  1373. for (size_t i = 1; i <= a_len; i++) {
  1374. // iterate through the elements of b
  1375. for (size_t j = 1; j <= b_len; j++) {
  1376. // if elements at the current positions match
  1377. if (a[i - 1] == b[j - 1]) {
  1378. // if it's the first element of either sequences, set LCS length to 1
  1379. if (i == 1 || j == 1) {
  1380. curr_row[j] = 1;
  1381. } else {
  1382. // increment LCS length by 1 compared to the previous element
  1383. curr_row[j] = prev_row[j - 1] + 1;
  1384. }
  1385. // update max_length if necessary
  1386. if (curr_row[j] > max_length) {
  1387. max_length = curr_row[j];
  1388. }
  1389. } else {
  1390. // reset LCS length if elements don't match
  1391. curr_row[j] = 0;
  1392. }
  1393. }
  1394. // update the previous row for the next iteration
  1395. prev_row = curr_row;
  1396. }
  1397. // return the maximum length of the LCS
  1398. return max_length;
  1399. }
  1400. //
  1401. // Vocab utils
  1402. //
  1403. std::vector<llama_token> common_tokenize(
  1404. const struct llama_context * ctx,
  1405. const std::string & text,
  1406. bool add_special,
  1407. bool parse_special) {
  1408. const llama_model * model = llama_get_model(ctx);
  1409. const llama_vocab * vocab = llama_model_get_vocab(model);
  1410. return common_tokenize(vocab, text, add_special, parse_special);
  1411. }
  1412. std::vector<llama_token> common_tokenize(
  1413. const struct llama_vocab * vocab,
  1414. const std::string & text,
  1415. bool add_special,
  1416. bool parse_special) {
  1417. // upper limit for the number of tokens
  1418. int n_tokens = text.length() + 2 * add_special;
  1419. std::vector<llama_token> result(n_tokens);
  1420. n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  1421. if (n_tokens < 0) {
  1422. result.resize(-n_tokens);
  1423. int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  1424. GGML_ASSERT(check == -n_tokens);
  1425. } else {
  1426. result.resize(n_tokens);
  1427. }
  1428. return result;
  1429. }
  1430. std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
  1431. const llama_model * model = llama_get_model(ctx);
  1432. const llama_vocab * vocab = llama_model_get_vocab(model);
  1433. return common_token_to_piece(vocab, token, special);
  1434. }
  1435. std::string common_token_to_piece(const struct llama_vocab * vocab, llama_token token, bool special) {
  1436. std::string piece;
  1437. piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
  1438. const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  1439. if (n_chars < 0) {
  1440. piece.resize(-n_chars);
  1441. int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  1442. GGML_ASSERT(check == -n_chars);
  1443. }
  1444. else {
  1445. piece.resize(n_chars);
  1446. }
  1447. return piece;
  1448. }
  1449. std::string common_detokenize(const struct llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
  1450. const llama_model * model = llama_get_model(ctx);
  1451. const llama_vocab * vocab = llama_model_get_vocab(model);
  1452. return common_detokenize(vocab, tokens, special);
  1453. }
  1454. std::string common_detokenize(const struct llama_vocab * vocab, const std::vector<llama_token> & tokens, bool special) {
  1455. std::string text;
  1456. text.resize(std::max(text.capacity(), tokens.size()));
  1457. int32_t n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
  1458. if (n_chars < 0) {
  1459. text.resize(-n_chars);
  1460. n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
  1461. GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
  1462. }
  1463. text.resize(n_chars);
  1464. // NOTE: the original tokenizer decodes bytes after collecting the pieces.
  1465. return text;
  1466. }
  1467. //
  1468. // Chat template utils
  1469. //
  1470. std::string common_get_builtin_chat_template(const struct llama_model * model) {
  1471. const char * ptr_tmpl = llama_model_chat_template(model);
  1472. return ptr_tmpl == nullptr ? "" : ptr_tmpl;
  1473. }
  1474. bool common_chat_verify_template(const std::string & tmpl) {
  1475. llama_chat_message chat[] = {{"user", "test"}};
  1476. const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0);
  1477. return res >= 0;
  1478. }
  1479. std::string common_chat_apply_template(const struct llama_model * model,
  1480. const std::string & tmpl,
  1481. const std::vector<common_chat_msg> & msgs,
  1482. bool add_ass) {
  1483. int alloc_size = 0;
  1484. bool fallback = false; // indicate if we must fallback to default chatml
  1485. std::vector<llama_chat_message> chat;
  1486. for (const auto & msg : msgs) {
  1487. chat.push_back({msg.role.c_str(), msg.content.c_str()});
  1488. alloc_size += (msg.role.size() + msg.content.size()) * 1.25;
  1489. }
  1490. const char * ptr_tmpl = tmpl.empty() ? llama_model_chat_template(model) : tmpl.c_str();
  1491. std::vector<char> buf(alloc_size);
  1492. // run the first time to get the total output length
  1493. int32_t res = llama_chat_apply_template(ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size());
  1494. // error: chat template is not supported
  1495. if (res < 0) {
  1496. if (ptr_tmpl != nullptr) {
  1497. // if the custom "tmpl" is not supported, we throw an error
  1498. // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
  1499. throw std::runtime_error("this custom template is not supported");
  1500. }
  1501. // If the built-in template is not supported, we default to chatml
  1502. res = llama_chat_apply_template("chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size());
  1503. fallback = true;
  1504. }
  1505. // if it turns out that our buffer is too small, we resize it
  1506. if ((size_t) res > buf.size()) {
  1507. buf.resize(res);
  1508. res = llama_chat_apply_template(
  1509. fallback ? "chatml" : ptr_tmpl,
  1510. chat.data(), chat.size(), add_ass, buf.data(), buf.size());
  1511. }
  1512. std::string formatted_chat(buf.data(), res);
  1513. return formatted_chat;
  1514. }
  1515. std::string common_chat_format_single(const struct llama_model * model,
  1516. const std::string & tmpl,
  1517. const std::vector<common_chat_msg> & past_msg,
  1518. const common_chat_msg & new_msg,
  1519. bool add_ass) {
  1520. std::ostringstream ss;
  1521. auto fmt_past_msg = past_msg.empty() ? "" : common_chat_apply_template(model, tmpl, past_msg, false);
  1522. std::vector<common_chat_msg> chat_new(past_msg);
  1523. // if the past_msg ends with a newline, we must preserve it in the formatted version
  1524. if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
  1525. ss << "\n";
  1526. };
  1527. // format chat with new_msg
  1528. chat_new.push_back(new_msg);
  1529. auto fmt_new_msg = common_chat_apply_template(model, tmpl, chat_new, add_ass);
  1530. // get the diff part
  1531. ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
  1532. return ss.str();
  1533. }
  1534. std::string common_chat_format_example(const struct llama_model * model,
  1535. const std::string & tmpl) {
  1536. std::vector<common_chat_msg> msgs = {
  1537. {"system", "You are a helpful assistant"},
  1538. {"user", "Hello"},
  1539. {"assistant", "Hi there"},
  1540. {"user", "How are you?"},
  1541. };
  1542. return common_chat_apply_template(model, tmpl, msgs, true);
  1543. }
  1544. //
  1545. // KV cache utils
  1546. //
  1547. void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
  1548. static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+";
  1549. printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d",
  1550. view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx);
  1551. llama_kv_cache_view_cell * c_curr = view.cells;
  1552. llama_seq_id * cs_curr = view.cells_sequences;
  1553. for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
  1554. if (i % row_size == 0) {
  1555. printf("\n%5d: ", i);
  1556. }
  1557. int seq_count = 0;
  1558. for (int j = 0; j < view.n_seq_max; j++) {
  1559. if (cs_curr[j] >= 0) { seq_count++; }
  1560. }
  1561. putchar(slot_chars[std::min(sizeof(slot_chars) - 2, size_t(seq_count))]);
  1562. }
  1563. printf("\n=== Done dumping\n");
  1564. }
  1565. void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
  1566. static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
  1567. printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n",
  1568. view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx);
  1569. std::unordered_map<llama_seq_id, size_t> seqs;
  1570. llama_kv_cache_view_cell * c_curr = view.cells;
  1571. llama_seq_id * cs_curr = view.cells_sequences;
  1572. for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
  1573. for (int j = 0; j < view.n_seq_max; j++) {
  1574. if (cs_curr[j] < 0) { continue; }
  1575. if (seqs.find(cs_curr[j]) == seqs.end()) {
  1576. if (seqs.size() + 1 >= sizeof(slot_chars)) { break; }
  1577. const size_t sz = seqs.size();
  1578. seqs[cs_curr[j]] = sz;
  1579. }
  1580. }
  1581. if (seqs.size() + 1 >= sizeof(slot_chars)) { break; }
  1582. }
  1583. printf("=== Sequence legend: ");
  1584. for (const auto & it : seqs) {
  1585. printf("%zu=%d, ", it.second, it.first);
  1586. }
  1587. printf("'+'=other sequence ids");
  1588. c_curr = view.cells;
  1589. cs_curr = view.cells_sequences;
  1590. for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
  1591. if (i % row_size == 0) {
  1592. printf("\n%5d: ", i);
  1593. }
  1594. for (int j = 0; j < view.n_seq_max; j++) {
  1595. if (cs_curr[j] >= 0) {
  1596. const auto & it = seqs.find(cs_curr[j]);
  1597. putchar(it != seqs.end() ? int(slot_chars[it->second]) : '+');
  1598. } else {
  1599. putchar('.');
  1600. }
  1601. }
  1602. putchar(' ');
  1603. }
  1604. printf("\n=== Done dumping\n");
  1605. }
  1606. //
  1607. // Embedding utils
  1608. //
  1609. void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
  1610. double sum = 0.0;
  1611. switch (embd_norm) {
  1612. case -1: // no normalisation
  1613. sum = 1.0;
  1614. break;
  1615. case 0: // max absolute
  1616. for (int i = 0; i < n; i++) {
  1617. if (sum < std::abs(inp[i])) {
  1618. sum = std::abs(inp[i]);
  1619. }
  1620. }
  1621. sum /= 32760.0; // make an int16 range
  1622. break;
  1623. case 2: // euclidean
  1624. for (int i = 0; i < n; i++) {
  1625. sum += inp[i] * inp[i];
  1626. }
  1627. sum = std::sqrt(sum);
  1628. break;
  1629. default: // p-norm (euclidean is p-norm p=2)
  1630. for (int i = 0; i < n; i++) {
  1631. sum += std::pow(std::abs(inp[i]), embd_norm);
  1632. }
  1633. sum = std::pow(sum, 1.0 / embd_norm);
  1634. break;
  1635. }
  1636. const float norm = sum > 0.0 ? 1.0 / sum : 0.0f;
  1637. for (int i = 0; i < n; i++) {
  1638. out[i] = inp[i] * norm;
  1639. }
  1640. }
  1641. float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){
  1642. double sum = 0.0;
  1643. double sum1 = 0.0;
  1644. double sum2 = 0.0;
  1645. for (int i = 0; i < n; i++) {
  1646. sum += embd1[i] * embd2[i];
  1647. sum1 += embd1[i] * embd1[i];
  1648. sum2 += embd2[i] * embd2[i];
  1649. }
  1650. // Handle the case where one or both vectors are zero vectors
  1651. if (sum1 == 0.0 || sum2 == 0.0) {
  1652. if (sum1 == 0.0 && sum2 == 0.0) {
  1653. return 1.0f; // two zero vectors are similar
  1654. }
  1655. return 0.0f;
  1656. }
  1657. return sum / (sqrt(sum1) * sqrt(sum2));
  1658. }
  1659. //
  1660. // Control vector utils
  1661. //
  1662. static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
  1663. common_control_vector_data result = { -1, {} };
  1664. ggml_context * ctx = nullptr;
  1665. struct gguf_init_params meta_gguf_params = {
  1666. /* .no_alloc = */ false,
  1667. /* .ctx = */ &ctx,
  1668. };
  1669. struct gguf_context * ctx_gguf = gguf_init_from_file(load_info.fname.c_str(), meta_gguf_params);
  1670. if (!ctx_gguf) {
  1671. LOG_ERR("%s: failed to load control vector file from %s\n", __func__, load_info.fname.c_str());
  1672. return result;
  1673. }
  1674. int32_t n_tensors = gguf_get_n_tensors(ctx_gguf);
  1675. if (n_tensors == 0) {
  1676. LOG_WRN("%s: no direction tensors found in %s\n", __func__, load_info.fname.c_str());
  1677. }
  1678. for (int i = 0; i < n_tensors; i++) {
  1679. std::string name = gguf_get_tensor_name(ctx_gguf, i);
  1680. int layer_idx = -1;
  1681. // split on '.'
  1682. size_t dotpos = name.find('.');
  1683. if (dotpos != std::string::npos && name.substr(0, dotpos) == "direction") {
  1684. try {
  1685. layer_idx = std::stoi(name.substr(dotpos + 1));
  1686. } catch (...) {
  1687. layer_idx = -1;
  1688. }
  1689. }
  1690. if (layer_idx < 0) {
  1691. LOG_ERR("%s: invalid/unparsable direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
  1692. result.n_embd = -1;
  1693. break;
  1694. } else if (layer_idx == 0) {
  1695. LOG_ERR("%s: invalid (zero) direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
  1696. result.n_embd = -1;
  1697. break;
  1698. }
  1699. struct ggml_tensor * tensor = ggml_get_tensor(ctx, name.c_str());
  1700. if (tensor->type != GGML_TYPE_F32) {
  1701. LOG_ERR("%s: invalid (non-F32) direction tensor type in %s\n", __func__, load_info.fname.c_str());
  1702. result.n_embd = -1;
  1703. break;
  1704. }
  1705. if (ggml_n_dims(tensor) != 1) {
  1706. LOG_ERR("%s: invalid (non-1D) direction tensor shape in %s\n", __func__, load_info.fname.c_str());
  1707. result.n_embd = -1;
  1708. break;
  1709. }
  1710. if (result.n_embd == -1) {
  1711. result.n_embd = ggml_nelements(tensor);
  1712. } else if (ggml_nelements(tensor) != result.n_embd) {
  1713. LOG_ERR("%s: direction tensor in %s does not match previous dimensions\n", __func__, load_info.fname.c_str());
  1714. result.n_embd = -1;
  1715. break;
  1716. }
  1717. // extend if necessary - do not store data for layer 0 (it's not used)
  1718. result.data.resize(std::max(result.data.size(), static_cast<size_t>(result.n_embd * layer_idx)), 0.0f);
  1719. const float * src = (const float *) tensor->data;
  1720. float * dst = result.data.data() + result.n_embd * (layer_idx - 1); // layer 1 at [0]
  1721. for (int j = 0; j < result.n_embd; j++) {
  1722. dst[j] += src[j] * load_info.strength; // allows multiple directions for same layer in same file
  1723. }
  1724. }
  1725. if (result.n_embd == -1) {
  1726. LOG_WRN("%s: skipping %s due to invalid direction tensors\n", __func__, load_info.fname.c_str());
  1727. result.data.clear();
  1728. }
  1729. gguf_free(ctx_gguf);
  1730. ggml_free(ctx);
  1731. return result;
  1732. }
  1733. common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
  1734. common_control_vector_data result = { -1, {} };
  1735. for (const auto & info : load_infos) {
  1736. auto cur = common_control_vector_load_one(info);
  1737. if (cur.n_embd == -1) {
  1738. result.n_embd = -1;
  1739. break;
  1740. }
  1741. if (result.n_embd != -1 && result.n_embd != cur.n_embd) {
  1742. LOG_ERR("%s: control vectors in %s does not match previous dimensions\n", __func__, info.fname.c_str());
  1743. result.n_embd = -1;
  1744. break;
  1745. }
  1746. if (result.n_embd == -1) {
  1747. result = std::move(cur);
  1748. } else {
  1749. result.data.resize(std::max(result.data.size(), cur.data.size()), 0.0f); // extend if necessary
  1750. for (size_t i = 0; i < cur.data.size(); i++) {
  1751. result.data[i] += cur.data[i];
  1752. }
  1753. }
  1754. }
  1755. if (result.n_embd == -1) {
  1756. LOG_ERR("%s: no valid control vector files passed\n", __func__);
  1757. result.data.clear();
  1758. }
  1759. return result;
  1760. }