llama-util.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. // Internal header to be included only by llama.cpp.
  2. // Contains wrappers around OS interfaces.
  3. #ifndef LLAMA_UTIL_H
  4. #define LLAMA_UTIL_H
  5. #include <cstdio>
  6. #include <cstdint>
  7. #include <cerrno>
  8. #include <cstring>
  9. #include <cstdarg>
  10. #include <cstdlib>
  11. #include <climits>
  12. #include <string>
  13. #include <vector>
  14. #include <stdexcept>
  15. #ifdef __has_include
  16. #if __has_include(<unistd.h>)
  17. #include <unistd.h>
  18. #if defined(_POSIX_MAPPED_FILES)
  19. #include <sys/mman.h>
  20. #endif
  21. #if defined(_POSIX_MEMLOCK_RANGE)
  22. #include <sys/resource.h>
  23. #endif
  24. #endif
  25. #endif
  26. #if defined(_WIN32)
  27. #define WIN32_LEAN_AND_MEAN
  28. #ifndef NOMINMAX
  29. #define NOMINMAX
  30. #endif
  31. #include <windows.h>
  32. #include <io.h>
  33. #include <stdio.h> // for _fseeki64
  34. #endif
  35. #define LLAMA_ASSERT(x) \
  36. do { \
  37. if (!(x)) { \
  38. fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  39. abort(); \
  40. } \
  41. } while (0)
  42. #ifdef __GNUC__
  43. #ifdef __MINGW32__
  44. __attribute__((format(gnu_printf, 1, 2)))
  45. #else
  46. __attribute__((format(printf, 1, 2)))
  47. #endif
  48. #endif
  49. static std::string format(const char * fmt, ...) {
  50. va_list ap, ap2;
  51. va_start(ap, fmt);
  52. va_copy(ap2, ap);
  53. int size = vsnprintf(NULL, 0, fmt, ap);
  54. LLAMA_ASSERT(size >= 0 && size < INT_MAX);
  55. std::vector<char> buf(size + 1);
  56. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  57. LLAMA_ASSERT(size2 == size);
  58. va_end(ap2);
  59. va_end(ap);
  60. return std::string(buf.data(), size);
  61. }
  62. struct llama_file {
  63. // use FILE * so we don't have to re-open the file to mmap
  64. FILE * fp;
  65. size_t size;
  66. llama_file(const char * fname, const char * mode) {
  67. fp = std::fopen(fname, mode);
  68. if (fp == NULL) {
  69. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  70. }
  71. seek(0, SEEK_END);
  72. size = tell();
  73. seek(0, SEEK_SET);
  74. }
  75. size_t tell() const {
  76. #ifdef _WIN32
  77. __int64 ret = _ftelli64(fp);
  78. #else
  79. long ret = std::ftell(fp);
  80. #endif
  81. LLAMA_ASSERT(ret != -1); // this really shouldn't fail
  82. return (size_t) ret;
  83. }
  84. void seek(size_t offset, int whence) {
  85. #ifdef _WIN32
  86. int ret = _fseeki64(fp, (__int64) offset, whence);
  87. #else
  88. int ret = std::fseek(fp, (long) offset, whence);
  89. #endif
  90. LLAMA_ASSERT(ret == 0); // same
  91. }
  92. void read_raw(void * ptr, size_t len) const {
  93. if (len == 0) {
  94. return;
  95. }
  96. errno = 0;
  97. std::size_t ret = std::fread(ptr, len, 1, fp);
  98. if (ferror(fp)) {
  99. throw std::runtime_error(format("read error: %s", strerror(errno)));
  100. }
  101. if (ret != 1) {
  102. throw std::runtime_error(std::string("unexpectedly reached end of file"));
  103. }
  104. }
  105. std::uint32_t read_u32() {
  106. std::uint32_t ret;
  107. read_raw(&ret, sizeof(ret));
  108. return ret;
  109. }
  110. std::string read_string(std::uint32_t len) {
  111. std::vector<char> chars(len);
  112. read_raw(chars.data(), len);
  113. return std::string(chars.data(), len);
  114. }
  115. void write_raw(const void * ptr, size_t len) const {
  116. if (len == 0) {
  117. return;
  118. }
  119. errno = 0;
  120. size_t ret = std::fwrite(ptr, len, 1, fp);
  121. if (ret != 1) {
  122. throw std::runtime_error(format("write error: %s", strerror(errno)));
  123. }
  124. }
  125. void write_u32(std::uint32_t val) {
  126. write_raw(&val, sizeof(val));
  127. }
  128. ~llama_file() {
  129. if (fp) {
  130. std::fclose(fp);
  131. }
  132. }
  133. };
  134. // llama_context_data
  135. struct llama_data_context {
  136. virtual void write(const void * src, size_t size) = 0;
  137. virtual size_t get_size_written() = 0;
  138. virtual ~llama_data_context() = default;
  139. };
  140. struct llama_data_buffer_context : llama_data_context {
  141. uint8_t* ptr;
  142. size_t size_written = 0;
  143. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  144. void write(const void * src, size_t size) override {
  145. memcpy(ptr, src, size);
  146. ptr += size;
  147. size_written += size;
  148. }
  149. size_t get_size_written() override {
  150. return size_written;
  151. }
  152. };
  153. struct llama_data_file_context : llama_data_context {
  154. llama_file* file;
  155. size_t size_written = 0;
  156. llama_data_file_context(llama_file * f) : file(f) {}
  157. void write(const void * src, size_t size) override {
  158. file->write_raw(src, size);
  159. size_written += size;
  160. }
  161. size_t get_size_written() override {
  162. return size_written;
  163. }
  164. };
  165. #if defined(_WIN32)
  166. static std::string llama_format_win_err(DWORD err) {
  167. LPSTR buf;
  168. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  169. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  170. if (!size) {
  171. return "FormatMessageA failed";
  172. }
  173. std::string ret(buf, size);
  174. LocalFree(buf);
  175. return ret;
  176. }
  177. #endif
  178. struct llama_mmap {
  179. void * addr;
  180. size_t size;
  181. llama_mmap(const llama_mmap &) = delete;
  182. #ifdef _POSIX_MAPPED_FILES
  183. static constexpr bool SUPPORTED = true;
  184. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  185. size = file->size;
  186. int fd = fileno(file->fp);
  187. int flags = MAP_SHARED;
  188. // prefetch/readahead impairs performance on NUMA systems
  189. if (numa) { prefetch = 0; }
  190. #ifdef __linux__
  191. if (prefetch >= file->size) { flags |= MAP_POPULATE; }
  192. #endif
  193. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  194. if (addr == MAP_FAILED) {
  195. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  196. }
  197. if (prefetch > 0) {
  198. // Advise the kernel to preload the mapped memory
  199. if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
  200. fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
  201. strerror(errno));
  202. }
  203. }
  204. if (numa) {
  205. // advise the kernel not to use readahead
  206. // (because the next page might not belong on the same node)
  207. if (madvise(addr, file->size, MADV_RANDOM)) {
  208. fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
  209. strerror(errno));
  210. }
  211. }
  212. }
  213. ~llama_mmap() {
  214. munmap(addr, size);
  215. }
  216. #elif defined(_WIN32)
  217. static constexpr bool SUPPORTED = true;
  218. llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
  219. (void) numa;
  220. size = file->size;
  221. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  222. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  223. DWORD error = GetLastError();
  224. if (hMapping == NULL) {
  225. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  226. }
  227. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  228. error = GetLastError();
  229. CloseHandle(hMapping);
  230. if (addr == NULL) {
  231. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  232. }
  233. if (prefetch) {
  234. // The PrefetchVirtualMemory API is only present on Windows 8 and above, so we
  235. // will dynamically load it using GetProcAddress.
  236. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  237. HMODULE hKernel32;
  238. // This call is guaranteed to succeed.
  239. hKernel32 = GetModuleHandleW(L"kernel32.dll");
  240. // This call may fail if on a pre-Win8 system.
  241. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  242. if (pPrefetchVirtualMemory) {
  243. // Advise the kernel to preload the mapped memory.
  244. WIN32_MEMORY_RANGE_ENTRY range;
  245. range.VirtualAddress = addr;
  246. range.NumberOfBytes = (SIZE_T)size;
  247. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  248. fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
  249. llama_format_win_err(GetLastError()).c_str());
  250. }
  251. }
  252. }
  253. }
  254. ~llama_mmap() {
  255. if (!UnmapViewOfFile(addr)) {
  256. fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
  257. llama_format_win_err(GetLastError()).c_str());
  258. }
  259. }
  260. #else
  261. static constexpr bool SUPPORTED = false;
  262. llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
  263. (void) prefetch;
  264. (void) numa;
  265. throw std::runtime_error(std::string("mmap not supported"));
  266. }
  267. #endif
  268. };
  269. // Represents some region of memory being locked using mlock or VirtualLock;
  270. // will automatically unlock on destruction.
  271. struct llama_mlock {
  272. void * addr = NULL;
  273. size_t size = 0;
  274. bool failed_already = false;
  275. llama_mlock() {}
  276. llama_mlock(const llama_mlock &) = delete;
  277. ~llama_mlock() {
  278. if (size) {
  279. raw_unlock(addr, size);
  280. }
  281. }
  282. void init(void * ptr) {
  283. LLAMA_ASSERT(addr == NULL && size == 0);
  284. addr = ptr;
  285. }
  286. void grow_to(size_t target_size) {
  287. LLAMA_ASSERT(addr);
  288. if (failed_already) {
  289. return;
  290. }
  291. size_t granularity = lock_granularity();
  292. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  293. if (target_size > size) {
  294. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  295. size = target_size;
  296. } else {
  297. failed_already = true;
  298. }
  299. }
  300. }
  301. #ifdef _POSIX_MEMLOCK_RANGE
  302. static constexpr bool SUPPORTED = true;
  303. size_t lock_granularity() {
  304. return (size_t) sysconf(_SC_PAGESIZE);
  305. }
  306. #ifdef __APPLE__
  307. #define MLOCK_SUGGESTION \
  308. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  309. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  310. #else
  311. #define MLOCK_SUGGESTION \
  312. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  313. #endif
  314. bool raw_lock(const void * addr, size_t size) {
  315. if (!mlock(addr, size)) {
  316. return true;
  317. } else {
  318. char* errmsg = std::strerror(errno);
  319. bool suggest = (errno == ENOMEM);
  320. // Check if the resource limit is fine after all
  321. struct rlimit lock_limit;
  322. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
  323. suggest = false;
  324. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
  325. suggest = false;
  326. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  327. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  328. return false;
  329. }
  330. }
  331. #undef MLOCK_SUGGESTION
  332. void raw_unlock(void * addr, size_t size) {
  333. if (munlock(addr, size)) {
  334. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  335. }
  336. }
  337. #elif defined(_WIN32)
  338. static constexpr bool SUPPORTED = true;
  339. size_t lock_granularity() {
  340. SYSTEM_INFO si;
  341. GetSystemInfo(&si);
  342. return (size_t) si.dwPageSize;
  343. }
  344. bool raw_lock(void * ptr, size_t len) {
  345. for (int tries = 1; ; tries++) {
  346. if (VirtualLock(ptr, len)) {
  347. return true;
  348. }
  349. if (tries == 2) {
  350. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  351. len, size, llama_format_win_err(GetLastError()).c_str());
  352. return false;
  353. }
  354. // It failed but this was only the first try; increase the working
  355. // set size and try again.
  356. SIZE_T min_ws_size, max_ws_size;
  357. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  358. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  359. llama_format_win_err(GetLastError()).c_str());
  360. return false;
  361. }
  362. // Per MSDN: "The maximum number of pages that a process can lock
  363. // is equal to the number of pages in its minimum working set minus
  364. // a small overhead."
  365. // Hopefully a megabyte is enough overhead:
  366. size_t increment = len + 1048576;
  367. // The minimum must be <= the maximum, so we need to increase both:
  368. min_ws_size += increment;
  369. max_ws_size += increment;
  370. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  371. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  372. llama_format_win_err(GetLastError()).c_str());
  373. return false;
  374. }
  375. }
  376. }
  377. void raw_unlock(void * ptr, size_t len) {
  378. if (!VirtualUnlock(ptr, len)) {
  379. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  380. llama_format_win_err(GetLastError()).c_str());
  381. }
  382. }
  383. #else
  384. static constexpr bool SUPPORTED = false;
  385. size_t lock_granularity() {
  386. return (size_t) 65536;
  387. }
  388. bool raw_lock(const void * addr, size_t len) {
  389. fprintf(stderr, "warning: mlock not supported on this system\n");
  390. return false;
  391. }
  392. void raw_unlock(const void * addr, size_t len) {}
  393. #endif
  394. };
  395. // Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
  396. struct llama_buffer {
  397. uint8_t * addr = NULL;
  398. size_t size = 0;
  399. llama_buffer() = default;
  400. void resize(size_t len) {
  401. #ifdef GGML_USE_METAL
  402. free(addr);
  403. int result = posix_memalign((void **) &addr, getpagesize(), len);
  404. if (result == 0) {
  405. memset(addr, 0, len);
  406. }
  407. else {
  408. addr = NULL;
  409. }
  410. #else
  411. delete[] addr;
  412. addr = new uint8_t[len];
  413. #endif
  414. size = len;
  415. }
  416. ~llama_buffer() {
  417. #ifdef GGML_USE_METAL
  418. free(addr);
  419. #else
  420. delete[] addr;
  421. #endif
  422. addr = NULL;
  423. }
  424. // disable copy and move
  425. llama_buffer(const llama_buffer&) = delete;
  426. llama_buffer(llama_buffer&&) = delete;
  427. llama_buffer& operator=(const llama_buffer&) = delete;
  428. llama_buffer& operator=(llama_buffer&&) = delete;
  429. };
  430. #ifdef GGML_USE_CUBLAS
  431. #include "ggml-cuda.h"
  432. struct llama_ctx_buffer {
  433. uint8_t * addr = NULL;
  434. bool is_cuda;
  435. size_t size = 0;
  436. llama_ctx_buffer() = default;
  437. void resize(size_t size) {
  438. free();
  439. addr = (uint8_t *) ggml_cuda_host_malloc(size);
  440. if (addr) {
  441. is_cuda = true;
  442. }
  443. else {
  444. // fall back to pageable memory
  445. addr = new uint8_t[size];
  446. is_cuda = false;
  447. }
  448. this->size = size;
  449. }
  450. void free() {
  451. if (addr) {
  452. if (is_cuda) {
  453. ggml_cuda_host_free(addr);
  454. }
  455. else {
  456. delete[] addr;
  457. }
  458. }
  459. addr = NULL;
  460. }
  461. ~llama_ctx_buffer() {
  462. free();
  463. }
  464. // disable copy and move
  465. llama_ctx_buffer(const llama_ctx_buffer&) = delete;
  466. llama_ctx_buffer(llama_ctx_buffer&&) = delete;
  467. llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
  468. llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
  469. };
  470. #else
  471. typedef llama_buffer llama_ctx_buffer;
  472. #endif
  473. #endif