test-gguf.cpp 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. #include "ggml.h"
  2. #include "ggml-backend.h"
  3. #include "../ggml/src/ggml-impl.h"
  4. #include <algorithm>
  5. #include <array>
  6. #include <cstdint>
  7. #include <cstdio>
  8. #include <random>
  9. #include <string>
  10. #include <vector>
  11. constexpr int offset_has_kv = 1000;
  12. constexpr int offset_has_tensors = 2000;
  13. constexpr int offset_has_data = 3000;
  14. enum handcrafted_file_type {
  15. HANDCRAFTED_HEADER_BAD_MAGIC = 10,
  16. HANDCRAFTED_HEADER_BAD_VERSION_1 = 20,
  17. HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30,
  18. HANDCRAFTED_HEADER_BAD_N_TENSORS = 40,
  19. HANDCRAFTED_HEADER_BAD_N_KV = 50,
  20. HANDCRAFTED_HEADER_EMPTY = 800,
  21. HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv,
  22. HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv,
  23. // HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv, // removed because it can result in allocations > 1 TB (default sanitizer limit)
  24. HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv,
  25. HANDCRAFTED_KV_BAD_ALIGN = 50 + offset_has_kv,
  26. HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv,
  27. HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors,
  28. HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors,
  29. HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors,
  30. HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors,
  31. HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors,
  32. HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors,
  33. HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors,
  34. HANDCRAFTED_TENSORS_BAD_ALIGN = 75 + offset_has_tensors,
  35. HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN = 80 + offset_has_tensors,
  36. HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors,
  37. HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors,
  38. HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data,
  39. HANDCRAFTED_DATA_BAD_ALIGN = 15 + offset_has_data,
  40. HANDCRAFTED_DATA_INCONSISTENT_ALIGN = 20 + offset_has_data,
  41. HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data,
  42. HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
  43. };
  44. static std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
  45. switch (hft) {
  46. case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
  47. case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
  48. case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
  49. case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV";
  50. case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS";
  51. case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY";
  52. case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE";
  53. case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE";
  54. case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY";
  55. case HANDCRAFTED_KV_BAD_ALIGN: return "KV_BAD_ALIGN";
  56. case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV";
  57. case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE";
  58. case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS";
  59. case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE";
  60. case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG";
  61. case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE";
  62. case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET";
  63. case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME";
  64. case HANDCRAFTED_TENSORS_BAD_ALIGN: return "TENSORS_BAD_ALIGN";
  65. case HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN: return "TENSORS_INCONSISTENT_ALIGN";
  66. case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS";
  67. case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN";
  68. case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA";
  69. case HANDCRAFTED_DATA_BAD_ALIGN: return "DATA_BAD_ALIGN";
  70. case HANDCRAFTED_DATA_INCONSISTENT_ALIGN: return "DATA_INCONSISTENT_ALIGN";
  71. case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS";
  72. case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN";
  73. }
  74. GGML_ABORT("fatal error");
  75. }
  76. static bool expect_context_not_null(const enum handcrafted_file_type hft) {
  77. if (hft < offset_has_kv) {
  78. return hft >= HANDCRAFTED_HEADER_EMPTY;
  79. }
  80. if (hft < offset_has_tensors) {
  81. return hft >= HANDCRAFTED_KV_SUCCESS;
  82. }
  83. if (hft < offset_has_data) {
  84. return hft >= HANDCRAFTED_TENSORS_SUCCESS;
  85. }
  86. return hft >= HANDCRAFTED_DATA_SUCCESS;
  87. }
  88. typedef std::pair<enum ggml_type, std::array<int64_t, GGML_MAX_DIMS>> tensor_config_t;
  89. static std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
  90. std::vector<tensor_config_t> tensor_configs;
  91. tensor_configs.reserve(100);
  92. for (int i = 0; i < 100; ++i) {
  93. const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
  94. if (ggml_type_size(type) == 0) {
  95. continue;
  96. }
  97. std::array<int64_t, GGML_MAX_DIMS> shape = {1, 1, 1, 1};
  98. shape[0] = (1 + rng() % 10) * ggml_blck_size(type);
  99. const int n_dims = 1 + rng() % GGML_MAX_DIMS;
  100. for (int i = 1; i < n_dims; ++i) {
  101. shape[i] = 1 + rng() % 10;
  102. }
  103. tensor_configs.push_back(std::make_pair(type, shape));
  104. }
  105. return tensor_configs;
  106. }
  107. static std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937 rng) {
  108. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  109. kv_types.reserve(100);
  110. for (int i = 0; i < 100; ++i) {
  111. const gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
  112. if (type == GGUF_TYPE_ARRAY) {
  113. const gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
  114. if (type_arr == GGUF_TYPE_ARRAY) {
  115. continue;
  116. }
  117. kv_types.push_back(std::make_pair(type, type_arr));
  118. continue;
  119. }
  120. kv_types.push_back(std::make_pair(type, gguf_type(-1)));
  121. }
  122. std::shuffle(kv_types.begin(), kv_types.end(), rng);
  123. return kv_types;
  124. }
  125. template <typename T>
  126. static void helper_write(FILE * file, const T & val) {
  127. GGML_ASSERT(fwrite(&val, 1, sizeof(val), file) == sizeof(val));
  128. }
  129. static void helper_write(FILE * file, const void * data, const size_t nbytes) {
  130. GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
  131. }
  132. static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
  133. FILE * file = tmpfile();
  134. if (!file) {
  135. return file;
  136. }
  137. std::mt19937 rng(seed);
  138. uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
  139. if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
  140. const char bad_magic[4] = {'F', 'U', 'G', 'G'};
  141. helper_write(file, bad_magic, sizeof(bad_magic));
  142. } else {
  143. helper_write(file, GGUF_MAGIC, 4);
  144. }
  145. if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
  146. const uint32_t version = 1;
  147. helper_write(file, version);
  148. } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
  149. const uint32_t version = GGUF_VERSION + 1;
  150. helper_write(file, version);
  151. } else {
  152. const uint32_t version = GGUF_VERSION;
  153. helper_write(file, version);
  154. }
  155. std::vector<tensor_config_t> tensor_configs;
  156. if (hft >= offset_has_tensors) {
  157. tensor_configs = get_tensor_configs(rng);
  158. }
  159. if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
  160. const uint64_t n_tensors = -1;
  161. helper_write(file, n_tensors);
  162. } else {
  163. const uint64_t n_tensors = tensor_configs.size();
  164. helper_write(file, n_tensors);
  165. }
  166. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  167. if (hft >= offset_has_kv) {
  168. kv_types = get_kv_types(rng);
  169. }
  170. {
  171. uint64_t n_kv = kv_types.size();
  172. if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
  173. hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
  174. hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
  175. n_kv += 1;
  176. } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
  177. n_kv = -1;
  178. }
  179. helper_write(file, n_kv);
  180. }
  181. if (hft < offset_has_kv) {
  182. while (ftell(file) % alignment != 0) {
  183. const char pad = 0;
  184. helper_write(file, pad);
  185. }
  186. for (int i = 0; i < extra_bytes; ++i) {
  187. const char tmp = 0;
  188. helper_write(file, tmp);
  189. }
  190. rewind(file);
  191. return file;
  192. }
  193. for (int i = 0; i < int(kv_types.size()); ++i) {
  194. const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].first);
  195. const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].second);
  196. const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
  197. if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
  198. const uint64_t n = -1;
  199. helper_write(file, n);
  200. } else {
  201. const uint64_t n = key.length();
  202. helper_write(file, n);
  203. }
  204. helper_write(file, key.data(), key.length());
  205. {
  206. const int32_t type32 = int32_t(type);
  207. helper_write(file, type32);
  208. }
  209. uint32_t data[16];
  210. for (int j = 0; j < 16; ++j) {
  211. data[j] = rng();
  212. if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
  213. data[j] |= 0x01010101; // avoid random null-termination of string
  214. }
  215. }
  216. if (type == GGUF_TYPE_STRING) {
  217. const uint64_t n = rng() % sizeof(data);
  218. helper_write(file, n);
  219. helper_write(file, data, n);
  220. continue;
  221. }
  222. if (type == GGUF_TYPE_ARRAY) {
  223. {
  224. const int32_t type32 = int32_t(type_arr);
  225. helper_write(file, type32);
  226. }
  227. if (type_arr == GGUF_TYPE_STRING) {
  228. const uint64_t nstr = rng() % (16 + 1);
  229. helper_write(file, nstr);
  230. for (uint64_t istr = 0; istr < nstr; ++istr) {
  231. const uint64_t n = rng() % (sizeof(uint32_t) + 1);
  232. helper_write(file, n);
  233. helper_write(file, &data[istr], n);
  234. }
  235. continue;
  236. }
  237. const size_t type_size = gguf_type_size(type_arr);
  238. const uint64_t n = (rng() % sizeof(data)) / type_size;
  239. helper_write(file, n);
  240. helper_write(file, &data, n*type_size);
  241. continue;
  242. }
  243. helper_write(file, data, hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type));
  244. }
  245. if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
  246. hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
  247. hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
  248. const uint64_t n = strlen(GGUF_KEY_GENERAL_ALIGNMENT);
  249. helper_write(file, n);
  250. helper_write(file, GGUF_KEY_GENERAL_ALIGNMENT, n);
  251. const int32_t type = gguf_type(GGUF_TYPE_UINT32);
  252. helper_write(file, type);
  253. alignment = expect_context_not_null(hft) ? 1 : 13;
  254. helper_write(file, alignment);
  255. }
  256. if (hft < offset_has_tensors) {
  257. while (ftell(file) % alignment != 0) {
  258. const char pad = 0;
  259. helper_write(file, pad);
  260. }
  261. for (int i = 0; i < extra_bytes; ++i) {
  262. const char tmp = 0;
  263. helper_write(file, tmp);
  264. }
  265. rewind(file);
  266. return file;
  267. }
  268. if (hft == HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN || hft == HANDCRAFTED_DATA_INCONSISTENT_ALIGN) {
  269. alignment = 1;
  270. }
  271. uint64_t offset = 0;
  272. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  273. const ggml_type type = tensor_configs[i].first;
  274. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  275. std::string name = "my_tensor";
  276. if (hft != HANDCRAFTED_TENSORS_DUPLICATE_NAME) {
  277. name += "_" + std::to_string(i);
  278. }
  279. if (hft == HANDCRAFTED_TENSORS_BAD_NAME_SIZE) {
  280. name += "_with_a_very_long_name_which_is_longer_than_what_is_allowed_for_ggml_tensors";
  281. GGML_ASSERT(name.length() >= GGML_MAX_NAME);
  282. }
  283. {
  284. const uint64_t n = name.length();
  285. helper_write(file, n);
  286. }
  287. helper_write(file, name.data(), name.length());
  288. uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
  289. for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
  290. if (shape[i] != 1) {
  291. n_dims = i + 1;
  292. break;
  293. }
  294. }
  295. if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
  296. const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
  297. helper_write(file, n_dims_bad);
  298. } else {
  299. helper_write(file, n_dims);
  300. }
  301. if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
  302. for (uint32_t j = 0; j < n_dims; ++j) {
  303. const int64_t bad_dim = -1;
  304. helper_write(file, bad_dim);
  305. }
  306. } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
  307. for (uint32_t j = 0; j < n_dims; ++j) {
  308. const int64_t big_dim = 4*int64_t(INT32_MAX);
  309. helper_write(file, big_dim);
  310. }
  311. } else {
  312. helper_write(file, shape.data(), n_dims*sizeof(int64_t));
  313. }
  314. {
  315. const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? GGML_TYPE_COUNT : int32_t(type);
  316. helper_write(file, type32);
  317. }
  318. if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
  319. const uint64_t bad_offset = -1;
  320. helper_write(file, bad_offset);
  321. } else {
  322. helper_write(file, offset);
  323. }
  324. int64_t ne = shape[0];
  325. for (uint32_t i = 1; i < n_dims; ++i) {
  326. ne *= shape[i];
  327. }
  328. offset += GGML_PAD(ggml_row_size(type, ne), alignment);
  329. }
  330. while (ftell(file) % alignment != 0) {
  331. const char pad = 0;
  332. helper_write(file, pad);
  333. }
  334. if (hft >= offset_has_data) {
  335. rng.seed(seed + 1);
  336. uint64_t nbytes = offset;
  337. if (hft == HANDCRAFTED_DATA_NOT_ENOUGH_DATA) {
  338. nbytes -= 1;
  339. }
  340. for (uint64_t i = 0; i < nbytes; ++i) {
  341. const uint8_t random_byte = i % 256;
  342. helper_write(file, random_byte);
  343. }
  344. }
  345. for (int i = 0; i < extra_bytes; ++i) {
  346. const char tmp = 0;
  347. helper_write(file, tmp);
  348. }
  349. rewind(file);
  350. return file;
  351. }
  352. static bool handcrafted_check_header(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_kv, const bool has_tensors, const bool alignment_defined) {
  353. if (!gguf_ctx) {
  354. return false;
  355. }
  356. std::mt19937 rng(seed);
  357. std::vector<tensor_config_t> tensor_configs;
  358. if (has_tensors) {
  359. tensor_configs = get_tensor_configs(rng);
  360. }
  361. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  362. if (has_kv) {
  363. kv_types = get_kv_types(rng);
  364. }
  365. bool ok = true;
  366. if (gguf_get_version(gguf_ctx) != GGUF_VERSION) {
  367. ok = false;
  368. }
  369. if (gguf_get_n_tensors(gguf_ctx) != int(tensor_configs.size())) {
  370. ok = false;
  371. }
  372. if (gguf_get_n_kv(gguf_ctx) != int(alignment_defined ? kv_types.size() + 1 : kv_types.size())) {
  373. ok = false;
  374. }
  375. return ok;
  376. }
  377. static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_tensors, const bool alignment_defined) {
  378. if (!gguf_ctx) {
  379. return false;
  380. }
  381. std::mt19937 rng(seed);
  382. std::vector<tensor_config_t> tensor_configs;
  383. if (has_tensors) {
  384. tensor_configs = get_tensor_configs(rng);
  385. }
  386. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types = get_kv_types(rng);
  387. bool ok = true;
  388. for (int i = 0; i < int(kv_types.size()); ++i) {
  389. const enum gguf_type type = gguf_type(kv_types[i].first);
  390. const enum gguf_type type_arr = gguf_type(kv_types[i].second);
  391. const std::string key = "my_key_" + std::to_string(i);
  392. uint32_t data[16];
  393. for (int j = 0; j < 16; ++j) {
  394. data[j] = rng();
  395. if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
  396. data[j] |= 0x01010101; // avoid random null-termination of string
  397. }
  398. }
  399. const char * data8 = reinterpret_cast<const char *>(data);
  400. const int id = gguf_find_key(gguf_ctx, key.c_str());
  401. if (type == GGUF_TYPE_STRING) {
  402. const char * str = gguf_get_val_str(gguf_ctx, id);
  403. const uint64_t n = strlen(str);
  404. const uint64_t n_expected = rng() % sizeof(data);
  405. if (n != n_expected) {
  406. ok = false;
  407. continue;
  408. }
  409. if (!std::equal(str, str + n, data8)) {
  410. ok = false;
  411. }
  412. continue;
  413. }
  414. if (type == GGUF_TYPE_ARRAY) {
  415. const size_t type_size = gguf_type_size(type_arr);
  416. const uint64_t arr_n = gguf_get_arr_n(gguf_ctx, id);
  417. if (type_arr == GGUF_TYPE_STRING) {
  418. const uint64_t nstr_expected = rng() % (16 + 1);
  419. if (arr_n != nstr_expected) {
  420. ok = false;
  421. continue;
  422. }
  423. for (uint64_t istr = 0; istr < nstr_expected; ++istr) {
  424. const char * str = gguf_get_arr_str(gguf_ctx, id, istr);
  425. const uint64_t n = strlen(str);
  426. const uint64_t n_expected = rng() % (sizeof(uint32_t) + 1);
  427. if (n != n_expected) {
  428. ok = false;
  429. continue;
  430. }
  431. const char * str_expected = reinterpret_cast<const char *>(&data[istr]);
  432. if (strncmp(str, str_expected, n) != 0) {
  433. ok = false;
  434. continue;
  435. }
  436. }
  437. continue;
  438. }
  439. const uint64_t arr_n_expected = (rng() % sizeof(data)) / type_size;
  440. if (arr_n != arr_n_expected) {
  441. ok = false;
  442. continue;
  443. }
  444. const char * data_gguf = reinterpret_cast<const char *>(gguf_get_arr_data(gguf_ctx, id));
  445. if (type_arr == GGUF_TYPE_BOOL) {
  446. for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
  447. if (bool(data8[arr_i]) != bool(data_gguf[arr_i])) {
  448. ok = false;
  449. }
  450. }
  451. continue;
  452. }
  453. if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
  454. ok = false;
  455. }
  456. continue;
  457. }
  458. const char * data_gguf = reinterpret_cast<const char *>(gguf_get_val_data(gguf_ctx, id));
  459. if (type == GGUF_TYPE_BOOL) {
  460. if (bool(*data8) != bool(*data_gguf)) {
  461. ok = false;
  462. }
  463. continue;
  464. }
  465. if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
  466. ok = false;
  467. }
  468. }
  469. const uint32_t expected_alignment = alignment_defined ? 1 : GGUF_DEFAULT_ALIGNMENT;
  470. if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
  471. ok = false;
  472. }
  473. return ok;
  474. }
  475. static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsigned int seed) {
  476. if (!gguf_ctx) {
  477. return false;
  478. }
  479. std::mt19937 rng(seed);
  480. std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
  481. // Call get_kv_types to get the same RNG state:
  482. get_kv_types(rng);
  483. bool ok = true;
  484. const int id_alignment = gguf_find_key(gguf_ctx, GGUF_KEY_GENERAL_ALIGNMENT);
  485. const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
  486. uint64_t expected_offset = 0;
  487. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  488. const ggml_type type = tensor_configs[i].first;
  489. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  490. const std::string name = "my_tensor_" + std::to_string(i);
  491. const int id = gguf_find_tensor(gguf_ctx, name.c_str());
  492. if (id >= 0) {
  493. if (std::string(gguf_get_tensor_name(gguf_ctx, id)) != name) {
  494. ok = false;
  495. }
  496. if (gguf_get_tensor_type(gguf_ctx, id) != type) {
  497. ok = false;
  498. }
  499. } else {
  500. ok = false;
  501. continue;
  502. }
  503. const size_t offset = gguf_get_tensor_offset(gguf_ctx, id);
  504. if (offset != expected_offset) {
  505. ok = false;
  506. }
  507. int64_t ne = shape[0];
  508. for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
  509. ne *= shape[j];
  510. }
  511. expected_offset += GGML_PAD(ggml_row_size(type, ne), alignment);
  512. }
  513. return ok;
  514. }
  515. static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const unsigned int seed, FILE * file) {
  516. if (!gguf_ctx) {
  517. return false;
  518. }
  519. std::mt19937 rng(seed);
  520. std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
  521. bool ok = true;
  522. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  523. const ggml_type type = tensor_configs[i].first;
  524. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  525. int64_t ne = shape[0];
  526. for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
  527. ne *= shape[j];
  528. }
  529. const size_t size = ggml_row_size(type, ne);
  530. const std::string name = "my_tensor_" + std::to_string(i);
  531. const size_t offset = gguf_get_tensor_offset(gguf_ctx, gguf_find_tensor(gguf_ctx, name.c_str()));
  532. std::vector<uint8_t> data(size);
  533. GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
  534. GGML_ASSERT(fread(data.data(), 1, data.size(), file) == data.size());
  535. for (size_t j = 0; j < size; ++j) {
  536. const uint8_t expected_byte = (j + offset) % 256;
  537. if (data[j] != expected_byte) {
  538. ok = false;
  539. }
  540. }
  541. }
  542. return ok;
  543. }
  544. static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
  545. int npass = 0;
  546. int ntest = 0;
  547. const std::vector<handcrafted_file_type> hfts = {
  548. HANDCRAFTED_HEADER_BAD_MAGIC,
  549. HANDCRAFTED_HEADER_BAD_VERSION_1,
  550. HANDCRAFTED_HEADER_BAD_VERSION_FUTURE,
  551. HANDCRAFTED_HEADER_BAD_N_KV,
  552. HANDCRAFTED_HEADER_BAD_N_TENSORS,
  553. HANDCRAFTED_HEADER_EMPTY,
  554. HANDCRAFTED_KV_BAD_KEY_SIZE,
  555. HANDCRAFTED_KV_BAD_TYPE,
  556. HANDCRAFTED_KV_DUPLICATE_KEY,
  557. HANDCRAFTED_KV_BAD_ALIGN,
  558. HANDCRAFTED_KV_SUCCESS,
  559. HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
  560. HANDCRAFTED_TENSORS_BAD_N_DIMS,
  561. HANDCRAFTED_TENSORS_BAD_SHAPE,
  562. HANDCRAFTED_TENSORS_NE_TOO_BIG,
  563. HANDCRAFTED_TENSORS_BAD_TYPE,
  564. HANDCRAFTED_TENSORS_BAD_OFFSET,
  565. HANDCRAFTED_TENSORS_DUPLICATE_NAME,
  566. HANDCRAFTED_TENSORS_BAD_ALIGN,
  567. HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN,
  568. HANDCRAFTED_TENSORS_SUCCESS,
  569. HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
  570. HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
  571. HANDCRAFTED_DATA_BAD_ALIGN,
  572. HANDCRAFTED_DATA_INCONSISTENT_ALIGN,
  573. HANDCRAFTED_DATA_SUCCESS,
  574. HANDCRAFTED_DATA_CUSTOM_ALIGN,
  575. };
  576. for (enum handcrafted_file_type hft : hfts) {
  577. printf("%s: handcrafted_file_type=%s\n", __func__, handcrafted_file_type_name(hft).c_str());
  578. FILE * file = get_handcrafted_file(seed, hft);
  579. #ifdef _WIN32
  580. if (!file) {
  581. printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
  582. printf("%s: skipping tests");
  583. continue;
  584. }
  585. #else
  586. GGML_ASSERT(file);
  587. #endif // _WIN32
  588. struct ggml_context * ctx = nullptr;
  589. struct gguf_init_params gguf_params = {
  590. /*no_alloc =*/ false,
  591. /*ctx =*/ hft >= offset_has_data ? &ctx : nullptr,
  592. };
  593. struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
  594. if (expect_context_not_null(hft)) {
  595. printf("%s: - context_not_null: ", __func__);
  596. } else {
  597. printf("%s: - context_null: ", __func__);
  598. }
  599. if (bool(gguf_ctx) == expect_context_not_null(hft)) {
  600. printf("\033[1;32mOK\033[0m\n");
  601. npass++;
  602. } else {
  603. printf("\033[1;31mFAIL\033[0m\n");
  604. }
  605. ntest++;
  606. if (hft >= offset_has_data && !expect_context_not_null(hft)) {
  607. printf("%s: - no_dangling_ggml_context_pointer: ", __func__);
  608. if (ctx) {
  609. printf("\033[1;31mFAIL\033[0m\n");
  610. } else {
  611. printf("\033[1;32mOK\033[0m\n");
  612. npass++;
  613. }
  614. ntest++;
  615. }
  616. const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
  617. if (expect_context_not_null(hft)) {
  618. printf("%s: - check_header: ", __func__);
  619. if (handcrafted_check_header(gguf_ctx, seed, hft >= offset_has_kv, hft >= offset_has_tensors, alignment_defined)) {
  620. printf("\033[1;32mOK\033[0m\n");
  621. npass++;
  622. } else {
  623. printf("\033[1;31mFAIL\033[0m\n");
  624. }
  625. ntest++;
  626. }
  627. if (expect_context_not_null(hft) && hft >= offset_has_kv) {
  628. printf("%s: - check_kv: ", __func__);
  629. if (handcrafted_check_kv(gguf_ctx, seed, hft >= offset_has_tensors, alignment_defined)) {
  630. printf("\033[1;32mOK\033[0m\n");
  631. npass++;
  632. } else {
  633. printf("\033[1;31mFAIL\033[0m\n");
  634. }
  635. ntest++;
  636. }
  637. if (expect_context_not_null(hft) && hft >= offset_has_tensors) {
  638. printf("%s: - check_tensors: ", __func__);
  639. if (handcrafted_check_tensors(gguf_ctx, seed)) {
  640. printf("\033[1;32mOK\033[0m\n");
  641. npass++;
  642. } else {
  643. printf("\033[1;31mFAIL\033[0m\n");
  644. }
  645. ntest++;
  646. }
  647. if (expect_context_not_null(hft) && hft >= offset_has_data) {
  648. printf("%s: - check_tensor_data: ", __func__);
  649. if (handcrafted_check_tensor_data(gguf_ctx, seed, file)) {
  650. printf("\033[1;32mOK\033[0m\n");
  651. npass++;
  652. } else {
  653. printf("\033[1;31mFAIL\033[0m\n");
  654. }
  655. ntest++;
  656. }
  657. fclose(file);
  658. if (gguf_ctx) {
  659. ggml_free(ctx);
  660. gguf_free(gguf_ctx);
  661. }
  662. printf("\n");
  663. }
  664. return std::make_pair(npass, ntest);
  665. }
  666. struct random_gguf_context_result {
  667. struct gguf_context * gguf_ctx;
  668. struct ggml_context * ctx;
  669. ggml_backend_buffer_t buffer;
  670. };
  671. static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t backend, const unsigned int seed) {
  672. std::mt19937 rng(seed);
  673. struct gguf_context * gguf_ctx = gguf_init_empty();
  674. for (int i = 0; i < 256; ++i) {
  675. const std::string key = "my_key_" + std::to_string(rng() % 1024);
  676. const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
  677. switch (type) {
  678. case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break;
  679. case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break;
  680. case GGUF_TYPE_UINT16: gguf_set_val_u16 (gguf_ctx, key.c_str(), rng() % (1 << 15)); break;
  681. case GGUF_TYPE_INT16: gguf_set_val_i16 (gguf_ctx, key.c_str(), rng() % (1 << 15) - (1 << 14)); break;
  682. case GGUF_TYPE_UINT32: gguf_set_val_u32 (gguf_ctx, key.c_str(), rng()); break;
  683. case GGUF_TYPE_INT32: gguf_set_val_i32 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
  684. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
  685. case GGUF_TYPE_BOOL: gguf_set_val_bool(gguf_ctx, key.c_str(), rng() % 2 == 0); break;
  686. case GGUF_TYPE_STRING: gguf_set_val_str (gguf_ctx, key.c_str(), std::to_string(rng()).c_str()); break;
  687. case GGUF_TYPE_UINT64: gguf_set_val_u64 (gguf_ctx, key.c_str(), rng()); break;
  688. case GGUF_TYPE_INT64: gguf_set_val_i64 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
  689. case GGUF_TYPE_FLOAT64: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
  690. case GGUF_TYPE_ARRAY: {
  691. const enum gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
  692. const uint64_t ne = rng() % 1024;
  693. switch (type_arr) {
  694. case GGUF_TYPE_UINT8:
  695. case GGUF_TYPE_INT8:
  696. case GGUF_TYPE_UINT16:
  697. case GGUF_TYPE_INT16:
  698. case GGUF_TYPE_UINT32:
  699. case GGUF_TYPE_INT32:
  700. case GGUF_TYPE_FLOAT32:
  701. case GGUF_TYPE_BOOL:
  702. case GGUF_TYPE_UINT64:
  703. case GGUF_TYPE_INT64:
  704. case GGUF_TYPE_FLOAT64: {
  705. const size_t nbytes = ne*gguf_type_size(type_arr);
  706. std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
  707. for (size_t j = 0; j < random_data.size(); ++j) {
  708. random_data[j] = rng();
  709. if (type_arr == GGUF_TYPE_BOOL) {
  710. random_data[j] &= 0x01010101; // the sanitizer complains if booleans are not 0 or 1
  711. }
  712. }
  713. gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
  714. } break;
  715. case GGUF_TYPE_STRING: {
  716. std::vector<std::string> data_cpp(ne);
  717. std::vector<const char *> data_c(ne);
  718. for (size_t j = 0; j < data_cpp.size(); ++j) {
  719. data_cpp[j] = std::to_string(rng());
  720. data_c[j] = data_cpp[j].c_str();
  721. }
  722. gguf_set_arr_str(gguf_ctx, key.c_str(), data_c.data(), ne);
  723. } break;
  724. case GGUF_TYPE_ARRAY: {
  725. break; // not supported
  726. }
  727. case GGUF_TYPE_COUNT:
  728. default: {
  729. GGML_ABORT("fatal error");
  730. }
  731. }
  732. } break;
  733. case GGUF_TYPE_COUNT:
  734. default: {
  735. GGML_ABORT("fatal error");
  736. }
  737. }
  738. }
  739. struct ggml_init_params ggml_params = {
  740. /*.mem_size =*/ 256*ggml_tensor_overhead(),
  741. /*.mem_buffer =*/ nullptr,
  742. /*.no_alloc =*/ true,
  743. };
  744. struct ggml_context * ctx = ggml_init(ggml_params);
  745. for (int i = 0; i < 256; ++i) {
  746. const std::string name = "my_tensor_" + std::to_string(i);
  747. const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
  748. const size_t type_size = ggml_type_size(type);
  749. if (type_size == 0) {
  750. continue;
  751. }
  752. const int n_dims = 1 + rng() % GGML_MAX_DIMS;
  753. int64_t ne[GGML_MAX_DIMS];
  754. ne[0] = (1 + rng() % 10) * ggml_blck_size(type);
  755. for (int j = 1; j < n_dims; ++j) {
  756. ne[j] = 1 + rng() % 10;
  757. }
  758. struct ggml_tensor * tensor = ggml_new_tensor(ctx, type, n_dims, ne);
  759. ggml_set_name(tensor, name.c_str());
  760. }
  761. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  762. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  763. const size_t nbytes = ggml_nbytes(t);
  764. std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
  765. for (size_t j = 0; j < random_data.size(); ++j) {
  766. random_data[j] = rng();
  767. }
  768. ggml_backend_tensor_set(t, random_data.data(), 0, nbytes);
  769. gguf_add_tensor(gguf_ctx, t);
  770. }
  771. return {gguf_ctx, ctx, buf};
  772. }
  773. static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other) {
  774. bool ok = true;
  775. const int n_kv = gguf_get_n_kv(ctx);
  776. for (int id = 0; id < n_kv; ++id) {
  777. const char * name = gguf_get_key(ctx, id);
  778. const int idx_other = gguf_find_key(other, name);
  779. if (idx_other < 0) {
  780. ok = false;
  781. continue;
  782. }
  783. const gguf_type type = gguf_get_kv_type(ctx, id);
  784. if (type != gguf_get_kv_type(other, idx_other)) {
  785. ok = false;
  786. continue;
  787. }
  788. if (type == GGUF_TYPE_ARRAY) {
  789. const size_t arr_n = gguf_get_arr_n(ctx, id);
  790. if (arr_n != gguf_get_arr_n(other, idx_other)) {
  791. ok = false;
  792. continue;
  793. }
  794. const gguf_type type_arr = gguf_get_arr_type(ctx, id);
  795. if (type_arr != gguf_get_arr_type(other, idx_other)) {
  796. ok = false;
  797. continue;
  798. }
  799. if (type_arr == GGUF_TYPE_BOOL) {
  800. const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
  801. const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
  802. for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
  803. if (bool(data[arr_i]) != bool(data_other[arr_i])) {
  804. ok = false;
  805. }
  806. }
  807. continue;
  808. }
  809. if (type_arr == GGUF_TYPE_STRING) {
  810. for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
  811. const std::string str = gguf_get_arr_str(ctx, id, arr_i);
  812. const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i);
  813. if (str != str_other) {
  814. ok = false;
  815. }
  816. }
  817. continue;
  818. }
  819. const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
  820. const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
  821. if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
  822. ok = false;
  823. }
  824. continue;
  825. }
  826. if (type == GGUF_TYPE_STRING) {
  827. const std::string str = gguf_get_val_str(ctx, id);
  828. const std::string str_other = gguf_get_val_str(other, idx_other);
  829. if (str != str_other) {
  830. ok = false;
  831. }
  832. continue;
  833. }
  834. const char * data = reinterpret_cast<const char *>(gguf_get_val_data(ctx, id));
  835. const char * data_other = reinterpret_cast<const char *>(gguf_get_val_data(other, idx_other));
  836. if (!std::equal(data, data + gguf_type_size(type), data_other)) {
  837. ok = false;
  838. }
  839. }
  840. return ok;
  841. }
  842. static bool all_tensors_in_other(const gguf_context * ctx, const gguf_context * other) {
  843. bool ok = true;
  844. const int n_tensors = gguf_get_n_tensors(ctx);
  845. for (int id = 0; id < n_tensors; ++id) {
  846. const std::string name = gguf_get_tensor_name(ctx, id);
  847. const int idx_other = gguf_find_tensor(other, name.c_str());
  848. if (id != idx_other) {
  849. ok = false;
  850. if (idx_other < 0) {
  851. continue;
  852. }
  853. }
  854. const ggml_type type = gguf_get_tensor_type(ctx, id);
  855. if (type != gguf_get_tensor_type(other, id)) {
  856. ok = false;
  857. }
  858. const size_t offset = gguf_get_tensor_offset(ctx, id);
  859. if (offset != gguf_get_tensor_offset(other, id)) {
  860. ok = false;
  861. }
  862. }
  863. return ok;
  864. }
  865. static bool same_tensor_data(const struct ggml_context * orig, const struct ggml_context * read) {
  866. bool ok = true;
  867. struct ggml_tensor * t_orig = ggml_get_first_tensor(orig);
  868. struct ggml_tensor * t_read = ggml_get_first_tensor(read);
  869. if (std::string(t_read->name) != "GGUF tensor data binary blob") {
  870. return false;
  871. }
  872. t_read = ggml_get_next_tensor(read, t_read);
  873. while (t_orig) {
  874. if (!t_read) {
  875. ok = false;
  876. break;
  877. }
  878. const size_t nbytes = ggml_nbytes(t_orig);
  879. if (ggml_nbytes(t_read) != nbytes) {
  880. ok = false;
  881. break;
  882. }
  883. std::vector<char> data_orig(nbytes);
  884. ggml_backend_tensor_get(t_orig, data_orig.data(), 0, nbytes);
  885. if (!std::equal(data_orig.data(), data_orig.data() + nbytes, reinterpret_cast<const char *>(t_read->data))) {
  886. ok = false;
  887. }
  888. t_orig = ggml_get_next_tensor(orig, t_orig);
  889. t_read = ggml_get_next_tensor(read, t_read);
  890. }
  891. if (t_read) {
  892. ok = false;
  893. }
  894. return ok;
  895. }
  896. static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
  897. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  898. printf("%s: device=%s, backend=%s, only_meta=%s\n",
  899. __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
  900. int npass = 0;
  901. int ntest = 0;
  902. struct gguf_context * gguf_ctx_0;
  903. struct ggml_context * ctx_0;
  904. ggml_backend_buffer_t bbuf;
  905. {
  906. struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
  907. gguf_ctx_0 = result.gguf_ctx;
  908. ctx_0 = result.ctx;
  909. bbuf = result.buffer;
  910. }
  911. FILE * file = tmpfile();
  912. #ifdef _WIN32
  913. if (!file) {
  914. printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
  915. printf("%s: skipping tests");
  916. return std::make_pair(0, 0);
  917. }
  918. #else
  919. GGML_ASSERT(file);
  920. #endif // _WIN32
  921. {
  922. std::vector<int8_t> buf;
  923. gguf_write_to_buf(gguf_ctx_0, buf, only_meta);
  924. GGML_ASSERT(fwrite(buf.data(), 1, buf.size(), file) == buf.size());
  925. rewind(file);
  926. }
  927. struct ggml_context * ctx_1 = nullptr;
  928. struct gguf_init_params gguf_params = {
  929. /*no_alloc =*/ false,
  930. /*ctx =*/ only_meta ? nullptr : &ctx_1,
  931. };
  932. struct gguf_context * gguf_ctx_1 = gguf_init_from_file_impl(file, gguf_params);
  933. printf("%s: same_version: ", __func__);
  934. if (gguf_get_version(gguf_ctx_0) == gguf_get_version(gguf_ctx_1)) {
  935. printf("\033[1;32mOK\033[0m\n");
  936. npass++;
  937. } else {
  938. printf("\033[1;31mFAIL\033[0m\n");
  939. }
  940. ntest++;
  941. printf("%s: same_n_kv: ", __func__);
  942. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
  943. printf("\033[1;32mOK\033[0m\n");
  944. npass++;
  945. } else {
  946. printf("\033[1;31mFAIL\033[0m\n");
  947. }
  948. ntest++;
  949. printf("%s: same_n_tensors: ", __func__);
  950. if (gguf_get_n_tensors(gguf_ctx_0) == gguf_get_n_tensors(gguf_ctx_1)) {
  951. printf("\033[1;32mOK\033[0m\n");
  952. npass++;
  953. } else {
  954. printf("\033[1;31mFAIL\033[0m\n");
  955. }
  956. ntest++;
  957. printf("%s: all_orig_kv_in_read: ", __func__);
  958. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
  959. printf("\033[1;32mOK\033[0m\n");
  960. npass++;
  961. } else {
  962. printf("\033[1;31mFAIL\033[0m\n");
  963. }
  964. ntest++;
  965. printf("%s: all_read_kv_in_orig: ", __func__);
  966. if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
  967. printf("\033[1;32mOK\033[0m\n");
  968. npass++;
  969. } else {
  970. printf("\033[1;31mFAIL\033[0m\n");
  971. }
  972. ntest++;
  973. printf("%s: all_orig_tensors_in_read: ", __func__);
  974. if (all_tensors_in_other(gguf_ctx_0, gguf_ctx_1)) {
  975. printf("\033[1;32mOK\033[0m\n");
  976. npass++;
  977. } else {
  978. printf("\033[1;31mFAIL\033[0m\n");
  979. }
  980. ntest++;
  981. printf("%s: all_read_tensors_in_orig: ", __func__);
  982. if (all_tensors_in_other(gguf_ctx_1, gguf_ctx_0)) {
  983. printf("\033[1;32mOK\033[0m\n");
  984. npass++;
  985. } else {
  986. printf("\033[1;31mFAIL\033[0m\n");
  987. }
  988. ntest++;
  989. if (!only_meta) {
  990. printf("%s: same_tensor_data: ", __func__);
  991. if (same_tensor_data(ctx_0, ctx_1)) {
  992. printf("\033[1;32mOK\033[0m\n");
  993. npass++;
  994. } else {
  995. printf("\033[1;31mFAIL\033[0m\n");
  996. }
  997. ntest++;
  998. }
  999. ggml_backend_buffer_free(bbuf);
  1000. ggml_free(ctx_0);
  1001. ggml_free(ctx_1);
  1002. gguf_free(gguf_ctx_0);
  1003. gguf_free(gguf_ctx_1);
  1004. ggml_backend_free(backend);
  1005. fclose(file);
  1006. printf("\n");
  1007. return std::make_pair(npass, ntest);
  1008. }
  1009. static std::pair<int, int> test_gguf_set_kv(ggml_backend_dev_t dev, const unsigned int seed) {
  1010. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  1011. printf("%s: device=%s, backend=%s\n", __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend));
  1012. int npass = 0;
  1013. int ntest = 0;
  1014. struct gguf_context * gguf_ctx_0;
  1015. struct ggml_context * ctx_0;
  1016. ggml_backend_buffer_t bbuf_0;
  1017. {
  1018. struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
  1019. gguf_ctx_0 = result.gguf_ctx;
  1020. ctx_0 = result.ctx;
  1021. bbuf_0 = result.buffer;
  1022. }
  1023. struct gguf_context * gguf_ctx_1;
  1024. struct ggml_context * ctx_1;
  1025. ggml_backend_buffer_t bbuf_1;
  1026. {
  1027. struct random_gguf_context_result result = get_random_gguf_context(backend, seed + 1);
  1028. gguf_ctx_1 = result.gguf_ctx;
  1029. ctx_1 = result.ctx;
  1030. bbuf_1 = result.buffer;
  1031. }
  1032. struct gguf_context * gguf_ctx_2 = gguf_init_empty();
  1033. gguf_set_kv(gguf_ctx_1, gguf_ctx_0);
  1034. gguf_set_kv(gguf_ctx_2, gguf_ctx_0);
  1035. printf("%s: same_n_kv: ", __func__);
  1036. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_2)) {
  1037. printf("\033[1;32mOK\033[0m\n");
  1038. npass++;
  1039. } else {
  1040. printf("\033[1;31mFAIL\033[0m\n");
  1041. }
  1042. ntest++;
  1043. printf("%s: all_kv_0_in_1: ", __func__);
  1044. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
  1045. printf("\033[1;32mOK\033[0m\n");
  1046. npass++;
  1047. } else {
  1048. printf("\033[1;31mFAIL\033[0m\n");
  1049. }
  1050. ntest++;
  1051. printf("%s: all_kv_0_in_2: ", __func__);
  1052. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_2)) {
  1053. printf("\033[1;32mOK\033[0m\n");
  1054. npass++;
  1055. } else {
  1056. printf("\033[1;31mFAIL\033[0m\n");
  1057. }
  1058. ntest++;
  1059. gguf_set_kv(gguf_ctx_0, gguf_ctx_1);
  1060. printf("%s: same_n_kv_after_double_copy: ", __func__);
  1061. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
  1062. printf("\033[1;32mOK\033[0m\n");
  1063. npass++;
  1064. } else {
  1065. printf("\033[1;31mFAIL\033[0m\n");
  1066. }
  1067. ntest++;
  1068. printf("%s: all_kv_1_in_0_after_double_copy: ", __func__);
  1069. if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
  1070. printf("\033[1;32mOK\033[0m\n");
  1071. npass++;
  1072. } else {
  1073. printf("\033[1;31mFAIL\033[0m\n");
  1074. }
  1075. ntest++;
  1076. ggml_backend_buffer_free(bbuf_0);
  1077. ggml_backend_buffer_free(bbuf_1);
  1078. ggml_free(ctx_0);
  1079. ggml_free(ctx_1);
  1080. gguf_free(gguf_ctx_0);
  1081. gguf_free(gguf_ctx_1);
  1082. gguf_free(gguf_ctx_2);
  1083. ggml_backend_free(backend);
  1084. printf("\n");
  1085. return std::make_pair(npass, ntest);
  1086. }
  1087. static void print_usage() {
  1088. printf("usage: test-gguf [seed]\n");
  1089. printf(" if no seed is unspecified then a random seed is used\n");
  1090. }
  1091. int main(int argc, char ** argv) {
  1092. if (argc > 2) {
  1093. print_usage();
  1094. return 1;
  1095. }
  1096. std::random_device rd;
  1097. const unsigned int seed = argc < 2 ? rd() : std::stoi(argv[1]);
  1098. // Initialize ggml backends early so the prints aren't interleaved with the test results:
  1099. ggml_backend_dev_count();
  1100. fprintf(stderr, "\n");
  1101. int npass = 0;
  1102. int ntest = 0;
  1103. {
  1104. std::pair<int, int> result = test_handcrafted_file(seed);
  1105. npass += result.first;
  1106. ntest += result.second;
  1107. }
  1108. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  1109. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  1110. for (bool only_meta : {true, false}) {
  1111. std::pair<int, int> result = test_roundtrip(dev, seed, only_meta);
  1112. npass += result.first;
  1113. ntest += result.second;
  1114. }
  1115. {
  1116. std::pair<int, int> result = test_gguf_set_kv(dev, seed);
  1117. npass += result.first;
  1118. ntest += result.second;
  1119. }
  1120. }
  1121. printf("%d/%d tests passed\n", npass, ntest);
  1122. if (npass != ntest) {
  1123. printf("\033[1;31mFAIL\033[0m\n");
  1124. return 1;
  1125. }
  1126. printf("\033[1;32mOK\033[0m\n");
  1127. return 0;
  1128. }