1
0

gguf-split.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. #include "ggml.h"
  2. #include "gguf.h"
  3. #include "llama.h"
  4. #include "common.h"
  5. #include <algorithm>
  6. #include <cinttypes>
  7. #include <climits>
  8. #include <cstdio>
  9. #include <cstdlib>
  10. #include <stdexcept>
  11. #include <cstring>
  12. #include <fstream>
  13. #include <string>
  14. #include <vector>
  15. #if defined(_WIN32)
  16. #include <windows.h>
  17. #ifndef PATH_MAX
  18. #define PATH_MAX MAX_PATH
  19. #endif
  20. #include <io.h>
  21. #endif
  22. enum split_operation : uint8_t {
  23. OP_NONE,
  24. OP_SPLIT,
  25. OP_MERGE,
  26. };
  27. enum split_mode : uint8_t {
  28. MODE_NONE,
  29. MODE_TENSOR,
  30. MODE_SIZE,
  31. };
  32. struct split_params {
  33. split_operation operation = OP_NONE;
  34. split_mode mode = MODE_NONE;
  35. size_t n_bytes_split = 0;
  36. int n_split_tensors = 128;
  37. std::string input;
  38. std::string output;
  39. bool no_tensor_first_split = false;
  40. bool dry_run = false;
  41. };
  42. static void split_print_usage(const char * executable) {
  43. const split_params default_params;
  44. printf("\n");
  45. printf("usage: %s [options] GGUF_IN GGUF_OUT\n", executable);
  46. printf("\n");
  47. printf("Apply a GGUF operation on IN to OUT.");
  48. printf("\n");
  49. printf("options:\n");
  50. printf(" -h, --help show this help message and exit\n");
  51. printf(" --version show version and build info\n");
  52. printf(" --split split GGUF to multiple GGUF (enabled by default)\n");
  53. printf(" --merge merge multiple GGUF to a single GGUF\n");
  54. printf(" --split-max-tensors max tensors in each split (default: %d)\n", default_params.n_split_tensors);
  55. printf(" --split-max-size N(M|G) max size per split\n");
  56. printf(" --no-tensor-first-split do not add tensors to the first split (disabled by default)\n");
  57. printf(" --dry-run only print out a split plan and exit, without writing any new files\n");
  58. printf("\n");
  59. }
  60. // return convert string, for example "128M" or "4G" to number of bytes
  61. static size_t split_str_to_n_bytes(std::string str) {
  62. size_t n_bytes = 0;
  63. int n;
  64. if (str.back() == 'M') {
  65. sscanf(str.c_str(), "%d", &n);
  66. n_bytes = (size_t)n * 1000 * 1000; // megabytes
  67. } else if (str.back() == 'G') {
  68. sscanf(str.c_str(), "%d", &n);
  69. n_bytes = (size_t)n * 1000 * 1000 * 1000; // gigabytes
  70. } else {
  71. throw std::invalid_argument("error: supported units are M (megabytes) or G (gigabytes), but got: " + std::string(1, str.back()));
  72. }
  73. if (n <= 0) {
  74. throw std::invalid_argument("error: size must be a positive value");
  75. }
  76. return n_bytes;
  77. }
  78. static void split_params_parse_ex(int argc, const char ** argv, split_params & params) {
  79. std::string arg;
  80. const std::string arg_prefix = "--";
  81. bool invalid_param = false;
  82. int arg_idx = 1;
  83. for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
  84. arg = argv[arg_idx];
  85. if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
  86. std::replace(arg.begin(), arg.end(), '_', '-');
  87. }
  88. bool arg_found = false;
  89. if (arg == "-h" || arg == "--help") {
  90. split_print_usage(argv[0]);
  91. exit(0);
  92. } else if (arg == "--version") {
  93. fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
  94. fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
  95. exit(0);
  96. } else if (arg == "--dry-run") {
  97. arg_found = true;
  98. params.dry_run = true;
  99. } else if (arg == "--no-tensor-first-split") {
  100. arg_found = true;
  101. params.no_tensor_first_split = true;
  102. } else if (arg == "--merge") {
  103. arg_found = true;
  104. if (params.operation != OP_NONE && params.operation != OP_MERGE) {
  105. throw std::invalid_argument("error: either --split or --merge can be specified, but not both");
  106. }
  107. params.operation = OP_MERGE;
  108. } else if (arg == "--split") {
  109. arg_found = true;
  110. if (params.operation != OP_NONE && params.operation != OP_SPLIT) {
  111. throw std::invalid_argument("error: either --split or --merge can be specified, but not both");
  112. }
  113. params.operation = OP_SPLIT;
  114. } else if (arg == "--split-max-tensors") {
  115. if (++arg_idx >= argc) {
  116. invalid_param = true;
  117. break;
  118. }
  119. arg_found = true;
  120. if (params.mode != MODE_NONE && params.mode != MODE_TENSOR) {
  121. throw std::invalid_argument("error: either --split-max-tensors or --split-max-size can be specified, but not both");
  122. }
  123. params.mode = MODE_TENSOR;
  124. params.n_split_tensors = atoi(argv[arg_idx]);
  125. } else if (arg == "--split-max-size") {
  126. if (++arg_idx >= argc) {
  127. invalid_param = true;
  128. break;
  129. }
  130. arg_found = true;
  131. if (params.mode != MODE_NONE && params.mode != MODE_SIZE) {
  132. throw std::invalid_argument("error: either --split-max-tensors or --split-max-size can be specified, but not both");
  133. }
  134. params.mode = MODE_SIZE;
  135. params.n_bytes_split = split_str_to_n_bytes(argv[arg_idx]);
  136. }
  137. if (!arg_found) {
  138. throw std::invalid_argument("error: unknown argument: " + arg);
  139. }
  140. }
  141. // the operation is split if not specified
  142. if (params.operation == OP_NONE) {
  143. params.operation = OP_SPLIT;
  144. }
  145. // the split mode is by tensor if not specified
  146. if (params.mode == MODE_NONE) {
  147. params.mode = MODE_TENSOR;
  148. }
  149. if (invalid_param) {
  150. throw std::invalid_argument("error: invalid parameter for argument: " + arg);
  151. }
  152. if (argc - arg_idx != 2) {
  153. throw std::invalid_argument("error: bad arguments");
  154. }
  155. params.input = argv[arg_idx++];
  156. params.output = argv[arg_idx++];
  157. }
  158. static bool split_params_parse(int argc, const char ** argv, split_params & params) {
  159. bool result = true;
  160. try {
  161. split_params_parse_ex(argc, argv, params);
  162. }
  163. catch (const std::invalid_argument & ex) {
  164. fprintf(stderr, "%s\n", ex.what());
  165. split_print_usage(argv[0]);
  166. exit(EXIT_FAILURE);
  167. }
  168. return result;
  169. }
  170. static void zeros(std::ofstream & file, size_t n) {
  171. char zero = 0;
  172. for (size_t i = 0; i < n; ++i) {
  173. file.write(&zero, 1);
  174. }
  175. }
  176. struct split_strategy {
  177. const split_params params;
  178. std::ifstream & f_input;
  179. struct gguf_context * ctx_gguf;
  180. struct ggml_context * ctx_meta = NULL;
  181. const int n_tensors;
  182. // one ctx_out per one output file
  183. std::vector<struct gguf_context *> ctx_outs;
  184. // temporary buffer for reading in tensor data
  185. std::vector<uint8_t> read_buf;
  186. split_strategy(const split_params & params,
  187. std::ifstream & f_input,
  188. struct gguf_context * ctx_gguf,
  189. struct ggml_context * ctx_meta) :
  190. params(params),
  191. f_input(f_input),
  192. ctx_gguf(ctx_gguf),
  193. ctx_meta(ctx_meta),
  194. n_tensors(gguf_get_n_tensors(ctx_gguf)) {
  195. // because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits
  196. int i_split = -1;
  197. struct gguf_context * ctx_out = NULL;
  198. auto new_ctx_out = [&](bool allow_no_tensors) {
  199. i_split++;
  200. if (ctx_out != NULL) {
  201. if (gguf_get_n_tensors(ctx_out) == 0 && !allow_no_tensors) {
  202. fprintf(stderr, "error: one of splits have 0 tensors. Maybe size or tensors limit is too small\n");
  203. exit(EXIT_FAILURE);
  204. }
  205. ctx_outs.push_back(ctx_out);
  206. }
  207. ctx_out = gguf_init_empty();
  208. // Save all metadata in first split only
  209. if (i_split == 0) {
  210. gguf_set_kv(ctx_out, ctx_gguf);
  211. }
  212. gguf_set_val_u16(ctx_out, LLM_KV_SPLIT_NO, i_split);
  213. gguf_set_val_u16(ctx_out, LLM_KV_SPLIT_COUNT, 0); // placeholder
  214. gguf_set_val_i32(ctx_out, LLM_KV_SPLIT_TENSORS_COUNT, n_tensors);
  215. };
  216. // initialize ctx_out for the first split
  217. new_ctx_out(false);
  218. // skip first split if no_tensor_first_split is set
  219. if (params.no_tensor_first_split) {
  220. new_ctx_out(true);
  221. }
  222. // process tensors one by one
  223. size_t curr_tensors_size = 0; // current size by counting only tensors size (without metadata)
  224. for (int i = 0; i < n_tensors; ++i) {
  225. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  226. // calculate the "imaginary" size = the current size + next tensor size
  227. size_t n_bytes = GGML_PAD(ggml_nbytes(t), GGUF_DEFAULT_ALIGNMENT);
  228. size_t next_tensors_size = curr_tensors_size + n_bytes;
  229. if (should_split(i, next_tensors_size)) {
  230. new_ctx_out(false);
  231. curr_tensors_size = n_bytes;
  232. } else {
  233. curr_tensors_size = next_tensors_size;
  234. }
  235. gguf_add_tensor(ctx_out, t);
  236. }
  237. // push the last ctx_out
  238. ctx_outs.push_back(ctx_out);
  239. // set the correct n_split for all ctx_out
  240. for (auto & ctx : ctx_outs) {
  241. gguf_set_val_u16(ctx, LLM_KV_SPLIT_COUNT, ctx_outs.size());
  242. }
  243. }
  244. ~split_strategy() {
  245. for (auto & ctx_out : ctx_outs) {
  246. gguf_free(ctx_out);
  247. }
  248. }
  249. bool should_split(int i_tensor, size_t next_size) {
  250. if (params.mode == MODE_SIZE) {
  251. // split by max size per file
  252. return next_size > params.n_bytes_split;
  253. } else if (params.mode == MODE_TENSOR) {
  254. // split by number of tensors per file
  255. return i_tensor > 0 && i_tensor < n_tensors && i_tensor % params.n_split_tensors == 0;
  256. }
  257. // should never happen
  258. GGML_ABORT("invalid mode");
  259. }
  260. void print_info() {
  261. printf("n_split: %zu\n", ctx_outs.size());
  262. int i_split = 0;
  263. for (auto & ctx_out : ctx_outs) {
  264. // re-calculate the real gguf size for each split (= metadata size + total size of all tensors)
  265. size_t total_size = gguf_get_meta_size(ctx_out);
  266. for (int i = 0; i < gguf_get_n_tensors(ctx_out); ++i) {
  267. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_out, i));
  268. total_size += ggml_nbytes(t);
  269. }
  270. total_size = total_size / 1000 / 1000; // convert to megabytes
  271. printf("split %05d: n_tensors = %" PRIi64 ", total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size);
  272. i_split++;
  273. }
  274. }
  275. void write() {
  276. int i_split = 0;
  277. int n_split = ctx_outs.size();
  278. for (auto & ctx_out : ctx_outs) {
  279. // construct file path
  280. char split_path[PATH_MAX] = {0};
  281. llama_split_path(split_path, sizeof(split_path), params.output.c_str(), i_split, n_split);
  282. // open the output file
  283. printf("Writing file %s ... ", split_path);
  284. fflush(stdout);
  285. std::ofstream fout = std::ofstream(split_path, std::ios::binary);
  286. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  287. // write metadata
  288. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  289. gguf_get_meta_data(ctx_out, data.data());
  290. fout.write((const char *)data.data(), data.size());
  291. // write tensors
  292. for (int i = 0; i < gguf_get_n_tensors(ctx_out); ++i) {
  293. // read tensor meta and prepare buffer
  294. const char * t_name = gguf_get_tensor_name(ctx_out, i);
  295. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
  296. auto n_bytes = ggml_nbytes(t);
  297. read_buf.resize(n_bytes);
  298. // calculate offset
  299. auto i_tensor_in = gguf_find_tensor(ctx_gguf, t_name); // idx of tensor in the input file
  300. auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor_in);
  301. // copy tensor from input to output file
  302. copy_file_to_file(f_input, fout, offset, n_bytes);
  303. zeros(fout, GGML_PAD(n_bytes, GGUF_DEFAULT_ALIGNMENT) - n_bytes);
  304. }
  305. printf("done\n");
  306. // close the file
  307. fout.close();
  308. i_split++;
  309. }
  310. }
  311. void copy_file_to_file(std::ifstream & f_in, std::ofstream & f_out, const size_t in_offset, const size_t len) {
  312. // TODO: detect OS and use copy_file_range() here for better performance
  313. if (read_buf.size() < len) {
  314. read_buf.resize(len);
  315. }
  316. f_in.seekg(in_offset);
  317. f_in.read((char *)read_buf.data(), len);
  318. f_out.write((const char *)read_buf.data(), len);
  319. }
  320. };
  321. static void gguf_split(const split_params & split_params) {
  322. struct ggml_context * ctx_meta = NULL;
  323. struct gguf_init_params params = {
  324. /*.no_alloc = */ true,
  325. /*.ctx = */ &ctx_meta,
  326. };
  327. std::ifstream f_input(split_params.input.c_str(), std::ios::binary);
  328. if (!f_input.is_open()) {
  329. fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_params.input.c_str());
  330. exit(EXIT_FAILURE);
  331. }
  332. auto * ctx_gguf = gguf_init_from_file(split_params.input.c_str(), params);
  333. if (!ctx_gguf) {
  334. fprintf(stderr, "%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
  335. exit(EXIT_FAILURE);
  336. }
  337. // prepare the strategy
  338. split_strategy strategy(split_params, f_input, ctx_gguf, ctx_meta);
  339. int n_split = strategy.ctx_outs.size();
  340. strategy.print_info();
  341. if (!split_params.dry_run) {
  342. // write all output splits
  343. strategy.write();
  344. }
  345. // done, clean up
  346. gguf_free(ctx_gguf);
  347. f_input.close();
  348. fprintf(stderr, "%s: %d gguf split written with a total of %d tensors.\n",
  349. __func__, n_split, strategy.n_tensors);
  350. }
  351. static void gguf_merge(const split_params & split_params) {
  352. fprintf(stderr, "%s: %s -> %s\n",
  353. __func__, split_params.input.c_str(),
  354. split_params.output.c_str());
  355. int n_split = 1;
  356. int total_tensors = 0;
  357. // avoid overwriting existing output file
  358. if (std::ifstream(split_params.output.c_str())) {
  359. fprintf(stderr, "%s: output file %s already exists\n", __func__, split_params.output.c_str());
  360. exit(EXIT_FAILURE);
  361. }
  362. std::ofstream fout(split_params.output.c_str(), std::ios::binary);
  363. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  364. auto * ctx_out = gguf_init_empty();
  365. std::vector<uint8_t> read_data;
  366. std::vector<ggml_context *> ctx_metas;
  367. std::vector<gguf_context *> ctx_ggufs;
  368. char split_path[PATH_MAX] = {0};
  369. strncpy(split_path, split_params.input.c_str(), sizeof(split_path) - 1);
  370. char split_prefix[PATH_MAX] = {0};
  371. // First pass to find KV and tensors metadata
  372. for (int i_split = 0; i_split < n_split; i_split++) {
  373. struct ggml_context * ctx_meta = NULL;
  374. struct gguf_init_params params = {
  375. /*.no_alloc = */ true,
  376. /*.ctx = */ &ctx_meta,
  377. };
  378. if (i_split > 0) {
  379. llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split);
  380. }
  381. fprintf(stderr, "%s: reading metadata %s ...", __func__, split_path);
  382. auto * ctx_gguf = gguf_init_from_file(split_path, params);
  383. if (!ctx_gguf) {
  384. fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
  385. exit(EXIT_FAILURE);
  386. }
  387. ctx_ggufs.push_back(ctx_gguf);
  388. ctx_metas.push_back(ctx_meta);
  389. if (i_split == 0) {
  390. auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
  391. if (key_n_split < 0) {
  392. fprintf(stderr,
  393. "\n%s: input file does not contain %s metadata\n",
  394. __func__,
  395. LLM_KV_SPLIT_COUNT);
  396. gguf_free(ctx_gguf);
  397. ggml_free(ctx_meta);
  398. gguf_free(ctx_out);
  399. fout.close();
  400. exit(EXIT_FAILURE);
  401. }
  402. n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
  403. if (n_split < 1) {
  404. fprintf(stderr,
  405. "\n%s: input file does not contain a valid split count %d\n",
  406. __func__,
  407. n_split);
  408. gguf_free(ctx_gguf);
  409. ggml_free(ctx_meta);
  410. gguf_free(ctx_out);
  411. fout.close();
  412. exit(EXIT_FAILURE);
  413. }
  414. // Verify the file naming and extract split_prefix
  415. if (!llama_split_prefix(split_prefix, sizeof (split_prefix), split_path, i_split, n_split)) {
  416. fprintf(stderr, "\n%s: unexpected input file name: %s"
  417. " i_split=%d"
  418. " n_split=%d\n", __func__,
  419. split_path, i_split, n_split);
  420. gguf_free(ctx_gguf);
  421. ggml_free(ctx_meta);
  422. gguf_free(ctx_out);
  423. fout.close();
  424. exit(EXIT_FAILURE);
  425. }
  426. // Do not trigger merge if we try to merge again the output
  427. gguf_set_val_u16(ctx_gguf, LLM_KV_SPLIT_COUNT, 0);
  428. // Set metadata from the first split
  429. gguf_set_kv(ctx_out, ctx_gguf);
  430. }
  431. auto n_tensors = gguf_get_n_tensors(ctx_gguf);
  432. for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
  433. const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
  434. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
  435. gguf_add_tensor(ctx_out, t);
  436. }
  437. total_tensors += n_tensors;
  438. fprintf(stderr, "\033[3Ddone\n");
  439. }
  440. // placeholder for the meta data
  441. {
  442. auto meta_size = gguf_get_meta_size(ctx_out);
  443. ::zeros(fout, meta_size);
  444. }
  445. // Write tensors data
  446. for (int i_split = 0; i_split < n_split; i_split++) {
  447. llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split);
  448. std::ifstream f_input(split_path, std::ios::binary);
  449. if (!f_input.is_open()) {
  450. fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_path);
  451. for (uint32_t i = 0; i < ctx_ggufs.size(); i++) {
  452. gguf_free(ctx_ggufs[i]);
  453. ggml_free(ctx_metas[i]);
  454. }
  455. gguf_free(ctx_out);
  456. fout.close();
  457. exit(EXIT_FAILURE);
  458. }
  459. fprintf(stderr, "%s: writing tensors %s ...", __func__, split_path);
  460. auto * ctx_gguf = ctx_ggufs[i_split];
  461. auto * ctx_meta = ctx_metas[i_split];
  462. auto n_tensors = gguf_get_n_tensors(ctx_gguf);
  463. for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
  464. const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
  465. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, t_name);
  466. auto n_bytes = ggml_nbytes(t);
  467. if (read_data.size() < n_bytes) {
  468. read_data.resize(n_bytes);
  469. }
  470. auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor);
  471. f_input.seekg(offset);
  472. f_input.read((char *)read_data.data(), n_bytes);
  473. // write tensor data + padding
  474. fout.write((const char *)read_data.data(), n_bytes);
  475. zeros(fout, GGML_PAD(n_bytes, GGUF_DEFAULT_ALIGNMENT) - n_bytes);
  476. }
  477. gguf_free(ctx_gguf);
  478. ggml_free(ctx_meta);
  479. f_input.close();
  480. fprintf(stderr, "\033[3Ddone\n");
  481. }
  482. {
  483. // go back to beginning of file and write the updated metadata
  484. fout.seekp(0);
  485. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  486. gguf_get_meta_data(ctx_out, data.data());
  487. fout.write((const char *)data.data(), data.size());
  488. fout.close();
  489. gguf_free(ctx_out);
  490. }
  491. fprintf(stderr, "%s: %s merged from %d split with %d tensors.\n",
  492. __func__, split_params.output.c_str(), n_split, total_tensors);
  493. }
  494. int main(int argc, const char ** argv) {
  495. split_params params;
  496. split_params_parse(argc, argv, params);
  497. switch (params.operation) {
  498. case OP_SPLIT: gguf_split(params);
  499. break;
  500. case OP_MERGE: gguf_merge(params);
  501. break;
  502. default: split_print_usage(argv[0]);
  503. exit(EXIT_FAILURE);
  504. }
  505. return 0;
  506. }