llama-util.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. // Internal header to be included only by llama.cpp.
  2. // Contains wrappers around OS interfaces.
  3. #ifndef LLAMA_UTIL_H
  4. #define LLAMA_UTIL_H
  5. #include <cstdio>
  6. #include <cstdint>
  7. #include <cerrno>
  8. #include <cstring>
  9. #include <cstdarg>
  10. #include <cstdlib>
  11. #include <climits>
  12. #include <string>
  13. #include <vector>
  14. #ifdef __has_include
  15. #if __has_include(<unistd.h>)
  16. #include <unistd.h>
  17. #if defined(_POSIX_MAPPED_FILES)
  18. #include <sys/mman.h>
  19. #endif
  20. #if defined(_POSIX_MEMLOCK_RANGE)
  21. #include <sys/resource.h>
  22. #endif
  23. #endif
  24. #endif
  25. #if defined(_WIN32)
  26. #define WIN32_LEAN_AND_MEAN
  27. #ifndef NOMINMAX
  28. #define NOMINMAX
  29. #endif
  30. #include <windows.h>
  31. #include <io.h>
  32. #include <stdio.h> // for _fseeki64
  33. #endif
  34. #define LLAMA_ASSERT(x) \
  35. do { \
  36. if (!(x)) { \
  37. fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  38. abort(); \
  39. } \
  40. } while (0)
  41. #ifdef __GNUC__
  42. #ifdef __MINGW32__
  43. __attribute__((format(gnu_printf, 1, 2)))
  44. #else
  45. __attribute__((format(printf, 1, 2)))
  46. #endif
  47. #endif
  48. static std::string format(const char * fmt, ...) {
  49. va_list ap, ap2;
  50. va_start(ap, fmt);
  51. va_copy(ap2, ap);
  52. int size = vsnprintf(NULL, 0, fmt, ap);
  53. LLAMA_ASSERT(size >= 0 && size < INT_MAX);
  54. std::vector<char> buf(size + 1);
  55. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  56. LLAMA_ASSERT(size2 == size);
  57. va_end(ap2);
  58. va_end(ap);
  59. return std::string(buf.data(), size);
  60. }
  61. struct llama_file {
  62. // use FILE * so we don't have to re-open the file to mmap
  63. FILE * fp;
  64. size_t size;
  65. llama_file(const char * fname, const char * mode) {
  66. fp = std::fopen(fname, mode);
  67. if (fp == NULL) {
  68. throw format("failed to open %s: %s", fname, std::strerror(errno));
  69. }
  70. seek(0, SEEK_END);
  71. size = tell();
  72. seek(0, SEEK_SET);
  73. }
  74. size_t tell() const {
  75. #ifdef _WIN32
  76. __int64 ret = _ftelli64(fp);
  77. #else
  78. long ret = std::ftell(fp);
  79. #endif
  80. LLAMA_ASSERT(ret != -1); // this really shouldn't fail
  81. return (size_t) ret;
  82. }
  83. void seek(size_t offset, int whence) {
  84. #ifdef _WIN32
  85. int ret = _fseeki64(fp, (__int64) offset, whence);
  86. #else
  87. int ret = std::fseek(fp, (long) offset, whence);
  88. #endif
  89. LLAMA_ASSERT(ret == 0); // same
  90. }
  91. void read_raw(void * ptr, size_t size) {
  92. if (size == 0) {
  93. return;
  94. }
  95. errno = 0;
  96. std::size_t ret = std::fread(ptr, size, 1, fp);
  97. if (ferror(fp)) {
  98. throw format("read error: %s", strerror(errno));
  99. }
  100. if (ret != 1) {
  101. throw std::string("unexpectedly reached end of file");
  102. }
  103. }
  104. std::uint32_t read_u32() {
  105. std::uint32_t ret;
  106. read_raw(&ret, sizeof(ret));
  107. return ret;
  108. }
  109. std::string read_string(std::uint32_t len) {
  110. std::vector<char> chars(len);
  111. read_raw(chars.data(), len);
  112. return std::string(chars.data(), len);
  113. }
  114. void write_raw(const void * ptr, size_t size) {
  115. if (size == 0) {
  116. return;
  117. }
  118. errno = 0;
  119. size_t ret = std::fwrite(ptr, size, 1, fp);
  120. if (ret != 1) {
  121. throw format("write error: %s", strerror(errno));
  122. }
  123. }
  124. void write_u32(std::uint32_t val) {
  125. write_raw(&val, sizeof(val));
  126. }
  127. ~llama_file() {
  128. if (fp) {
  129. std::fclose(fp);
  130. }
  131. }
  132. };
  133. #if defined(_WIN32)
  134. static std::string llama_format_win_err(DWORD err) {
  135. LPSTR buf;
  136. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  137. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  138. if (!size) {
  139. return "FormatMessageA failed";
  140. }
  141. std::string ret(buf, size);
  142. LocalFree(buf);
  143. return ret;
  144. }
  145. #endif
  146. struct llama_mmap {
  147. void * addr;
  148. size_t size;
  149. llama_mmap(const llama_mmap &) = delete;
  150. #ifdef _POSIX_MAPPED_FILES
  151. static constexpr bool SUPPORTED = true;
  152. llama_mmap(struct llama_file * file, bool prefetch = true) {
  153. size = file->size;
  154. int fd = fileno(file->fp);
  155. int flags = MAP_SHARED;
  156. #ifdef __linux__
  157. flags |= MAP_POPULATE;
  158. #endif
  159. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  160. if (addr == MAP_FAILED) {
  161. throw format("mmap failed: %s", strerror(errno));
  162. }
  163. if (prefetch) {
  164. // Advise the kernel to preload the mapped memory
  165. if (madvise(addr, file->size, MADV_WILLNEED)) {
  166. fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
  167. strerror(errno));
  168. }
  169. }
  170. }
  171. ~llama_mmap() {
  172. munmap(addr, size);
  173. }
  174. #elif defined(_WIN32)
  175. static constexpr bool SUPPORTED = true;
  176. llama_mmap(struct llama_file * file, bool prefetch = true) {
  177. size = file->size;
  178. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  179. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  180. DWORD error = GetLastError();
  181. if (hMapping == NULL) {
  182. throw format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str());
  183. }
  184. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  185. error = GetLastError();
  186. CloseHandle(hMapping);
  187. if (addr == NULL) {
  188. throw format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str());
  189. }
  190. #if _WIN32_WINNT >= _WIN32_WINNT_WIN8
  191. if (prefetch) {
  192. // Advise the kernel to preload the mapped memory
  193. WIN32_MEMORY_RANGE_ENTRY range;
  194. range.VirtualAddress = addr;
  195. range.NumberOfBytes = (SIZE_T)size;
  196. if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  197. fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
  198. llama_format_win_err(GetLastError()).c_str());
  199. }
  200. }
  201. #else
  202. #pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
  203. #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
  204. }
  205. ~llama_mmap() {
  206. if (!UnmapViewOfFile(addr)) {
  207. fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
  208. llama_format_win_err(GetLastError()).c_str());
  209. }
  210. }
  211. #else
  212. static constexpr bool SUPPORTED = false;
  213. llama_mmap(struct llama_file *, bool prefetch = true) {
  214. (void)prefetch;
  215. throw std::string("mmap not supported");
  216. }
  217. #endif
  218. };
  219. // Represents some region of memory being locked using mlock or VirtualLock;
  220. // will automatically unlock on destruction.
  221. struct llama_mlock {
  222. void * addr = NULL;
  223. size_t size = 0;
  224. bool failed_already = false;
  225. llama_mlock() {}
  226. llama_mlock(const llama_mlock &) = delete;
  227. ~llama_mlock() {
  228. if (size) {
  229. raw_unlock(addr, size);
  230. }
  231. }
  232. void init(void * addr) {
  233. LLAMA_ASSERT(this->addr == NULL && this->size == 0);
  234. this->addr = addr;
  235. }
  236. void grow_to(size_t target_size) {
  237. LLAMA_ASSERT(addr);
  238. if (failed_already) {
  239. return;
  240. }
  241. size_t granularity = lock_granularity();
  242. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  243. if (target_size > size) {
  244. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  245. size = target_size;
  246. } else {
  247. failed_already = true;
  248. }
  249. }
  250. }
  251. #ifdef _POSIX_MEMLOCK_RANGE
  252. static constexpr bool SUPPORTED = true;
  253. size_t lock_granularity() {
  254. return (size_t) sysconf(_SC_PAGESIZE);
  255. }
  256. #ifdef __APPLE__
  257. #define MLOCK_SUGGESTION \
  258. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  259. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  260. #else
  261. #define MLOCK_SUGGESTION \
  262. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  263. #endif
  264. bool raw_lock(const void * addr, size_t size) {
  265. if (!mlock(addr, size)) {
  266. return true;
  267. } else {
  268. char* errmsg = std::strerror(errno);
  269. bool suggest = (errno == ENOMEM);
  270. // Check if the resource limit is fine after all
  271. struct rlimit lock_limit;
  272. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
  273. suggest = false;
  274. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
  275. suggest = false;
  276. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  277. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  278. return false;
  279. }
  280. }
  281. #undef MLOCK_SUGGESTION
  282. void raw_unlock(void * addr, size_t size) {
  283. if (munlock(addr, size)) {
  284. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  285. }
  286. }
  287. #elif defined(_WIN32)
  288. static constexpr bool SUPPORTED = true;
  289. size_t lock_granularity() {
  290. SYSTEM_INFO si;
  291. GetSystemInfo(&si);
  292. return (size_t) si.dwPageSize;
  293. }
  294. bool raw_lock(void * addr, size_t size) {
  295. for (int tries = 1; ; tries++) {
  296. if (VirtualLock(addr, size)) {
  297. return true;
  298. }
  299. if (tries == 2) {
  300. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  301. size, this->size, llama_format_win_err(GetLastError()).c_str());
  302. return false;
  303. }
  304. // It failed but this was only the first try; increase the working
  305. // set size and try again.
  306. SIZE_T min_ws_size, max_ws_size;
  307. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  308. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  309. llama_format_win_err(GetLastError()).c_str());
  310. return false;
  311. }
  312. // Per MSDN: "The maximum number of pages that a process can lock
  313. // is equal to the number of pages in its minimum working set minus
  314. // a small overhead."
  315. // Hopefully a megabyte is enough overhead:
  316. size_t increment = size + 1048576;
  317. // The minimum must be <= the maximum, so we need to increase both:
  318. min_ws_size += increment;
  319. max_ws_size += increment;
  320. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  321. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  322. llama_format_win_err(GetLastError()).c_str());
  323. return false;
  324. }
  325. }
  326. }
  327. void raw_unlock(void * addr, size_t size) {
  328. if (!VirtualUnlock(addr, size)) {
  329. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  330. llama_format_win_err(GetLastError()).c_str());
  331. }
  332. }
  333. #else
  334. static constexpr bool SUPPORTED = false;
  335. size_t lock_granularity() {
  336. return (size_t) 65536;
  337. }
  338. bool raw_lock(const void * addr, size_t size) {
  339. fprintf(stderr, "warning: mlock not supported on this system\n");
  340. return false;
  341. }
  342. void raw_unlock(const void * addr, size_t size) {}
  343. #endif
  344. };
  345. // Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
  346. struct llama_buffer {
  347. uint8_t * addr = NULL;
  348. size_t size = 0;
  349. llama_buffer() = default;
  350. void resize(size_t size) {
  351. delete[] addr;
  352. addr = new uint8_t[size];
  353. this->size = size;
  354. }
  355. ~llama_buffer() {
  356. delete[] addr;
  357. }
  358. // disable copy and move
  359. llama_buffer(const llama_buffer&) = delete;
  360. llama_buffer(llama_buffer&&) = delete;
  361. llama_buffer& operator=(const llama_buffer&) = delete;
  362. llama_buffer& operator=(llama_buffer&&) = delete;
  363. };
  364. #ifdef GGML_USE_CUBLAS
  365. #include "ggml-cuda.h"
  366. struct llama_ctx_buffer {
  367. uint8_t * addr = NULL;
  368. bool is_cuda;
  369. size_t size = 0;
  370. llama_ctx_buffer() = default;
  371. void resize(size_t size) {
  372. free();
  373. addr = (uint8_t *) ggml_cuda_host_malloc(size);
  374. if (addr) {
  375. is_cuda = true;
  376. }
  377. else {
  378. // fall back to pageable memory
  379. addr = new uint8_t[size];
  380. is_cuda = false;
  381. }
  382. this->size = size;
  383. }
  384. void free() {
  385. if (addr) {
  386. if (is_cuda) {
  387. ggml_cuda_host_free(addr);
  388. }
  389. else {
  390. delete[] addr;
  391. }
  392. }
  393. addr = NULL;
  394. }
  395. ~llama_ctx_buffer() {
  396. free();
  397. }
  398. // disable copy and move
  399. llama_ctx_buffer(const llama_ctx_buffer&) = delete;
  400. llama_ctx_buffer(llama_ctx_buffer&&) = delete;
  401. llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
  402. llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
  403. };
  404. #else
  405. typedef llama_buffer llama_ctx_buffer;
  406. #endif
  407. #endif