1
0

test-gguf.cpp 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. #include "ggml.h"
  2. #include "ggml-backend.h"
  3. #include "../ggml/src/ggml-impl.h"
  4. #include <algorithm>
  5. #include <array>
  6. #include <cstdint>
  7. #include <cstdio>
  8. #include <random>
  9. #include <string>
  10. #include <vector>
  11. constexpr int offset_has_kv = 1000;
  12. constexpr int offset_has_tensors = 2000;
  13. constexpr int offset_has_data = 3000;
  14. enum handcrafted_file_type {
  15. HANDCRAFTED_HEADER_BAD_MAGIC = 10,
  16. HANDCRAFTED_HEADER_BAD_VERSION_1 = 20,
  17. HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30,
  18. HANDCRAFTED_HEADER_BAD_N_TENSORS = 40,
  19. HANDCRAFTED_HEADER_BAD_N_KV = 50,
  20. HANDCRAFTED_HEADER_EMPTY = 800,
  21. HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv,
  22. HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv,
  23. HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv,
  24. HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv,
  25. HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv,
  26. HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors,
  27. HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors,
  28. HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors,
  29. HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors,
  30. HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors,
  31. HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors,
  32. HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors,
  33. HANDCRAFTED_TENSORS_BAD_ALIGNMENT = 80 + offset_has_tensors,
  34. HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors,
  35. HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors,
  36. HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data,
  37. HANDCRAFTED_DATA_BAD_ALIGNMENT = 20 + offset_has_data,
  38. HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data,
  39. HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
  40. };
  41. std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
  42. switch (hft) {
  43. case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
  44. case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
  45. case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
  46. case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV";
  47. case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS";
  48. case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY";
  49. case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE";
  50. case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE";
  51. case HANDCRAFTED_KV_BAD_VALUE_SIZE: return "KV_BAD_VALUE_SIZE";
  52. case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY";
  53. case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV";
  54. case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE";
  55. case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS";
  56. case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE";
  57. case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG";
  58. case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE";
  59. case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET";
  60. case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME";
  61. case HANDCRAFTED_TENSORS_BAD_ALIGNMENT: return "TENSORS_BAD_ALIGNMENT";
  62. case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS";
  63. case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN";
  64. case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA";
  65. case HANDCRAFTED_DATA_BAD_ALIGNMENT: return "DATA_BAD_ALIGNMENT";
  66. case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS";
  67. case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN";
  68. }
  69. GGML_ABORT("fatal error");
  70. }
  71. static bool expect_context_not_null(const enum handcrafted_file_type hft) {
  72. if (hft < offset_has_kv) {
  73. return hft >= HANDCRAFTED_HEADER_EMPTY;
  74. }
  75. if (hft < offset_has_tensors) {
  76. return hft >= HANDCRAFTED_KV_SUCCESS;
  77. }
  78. if (hft < offset_has_data) {
  79. return hft >= HANDCRAFTED_TENSORS_SUCCESS;
  80. }
  81. return hft >= HANDCRAFTED_DATA_SUCCESS;
  82. }
  83. typedef std::pair<enum ggml_type, std::array<int64_t, GGML_MAX_DIMS>> tensor_config_t;
  84. std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
  85. std::vector<tensor_config_t> tensor_configs;
  86. tensor_configs.reserve(100);
  87. for (int i = 0; i < 100; ++i) {
  88. const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
  89. if (ggml_type_size(type) == 0) {
  90. continue;
  91. }
  92. std::array<int64_t, GGML_MAX_DIMS> shape = {1, 1, 1, 1};
  93. shape[0] = (1 + rng() % 10) * ggml_blck_size(type);
  94. const int n_dims = 1 + rng() % GGML_MAX_DIMS;
  95. for (int i = 1; i < n_dims; ++i) {
  96. shape[i] = 1 + rng() % 10;
  97. }
  98. tensor_configs.push_back(std::make_pair(type, shape));
  99. }
  100. return tensor_configs;
  101. }
  102. std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937 rng) {
  103. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  104. kv_types.reserve(100);
  105. for (int i = 0; i < 100; ++i) {
  106. const gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
  107. if (type == GGUF_TYPE_ARRAY) {
  108. const gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
  109. if (type_arr == GGUF_TYPE_ARRAY) {
  110. continue;
  111. }
  112. kv_types.push_back(std::make_pair(type, type_arr));
  113. continue;
  114. }
  115. kv_types.push_back(std::make_pair(type, gguf_type(-1)));
  116. }
  117. std::shuffle(kv_types.begin(), kv_types.end(), rng);
  118. return kv_types;
  119. }
  120. static void helper_write(const void * data, const size_t nbytes, FILE * file) {
  121. GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
  122. }
  123. static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
  124. FILE * file = tmpfile();
  125. std::mt19937 rng(seed);
  126. if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
  127. const char bad_magic[4] = {'F', 'U', 'G', 'G'};
  128. helper_write(bad_magic, sizeof(bad_magic), file);
  129. } else {
  130. helper_write(GGUF_MAGIC, 4, file);
  131. }
  132. if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
  133. const uint32_t version = 1;
  134. helper_write(&version, sizeof(version), file);
  135. } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
  136. const uint32_t version = GGUF_VERSION + 1;
  137. helper_write(&version, sizeof(version), file);
  138. } else {
  139. const uint32_t version = GGUF_VERSION;
  140. helper_write(&version, sizeof(version), file);
  141. }
  142. std::vector<tensor_config_t> tensor_configs;
  143. if (hft >= offset_has_tensors) {
  144. tensor_configs = get_tensor_configs(rng);
  145. }
  146. if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
  147. const uint64_t n_tensors = -1;
  148. helper_write(&n_tensors, sizeof(n_tensors), file);
  149. } else {
  150. const uint64_t n_tensors = tensor_configs.size();
  151. helper_write(&n_tensors, sizeof(n_tensors), file);
  152. }
  153. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  154. if (hft >= offset_has_kv) {
  155. kv_types = get_kv_types(rng);
  156. }
  157. {
  158. uint64_t n_kv = kv_types.size();
  159. if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
  160. n_kv += 1;
  161. } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
  162. n_kv = -1;
  163. }
  164. helper_write(&n_kv, sizeof(n_kv), file);
  165. }
  166. if (hft < offset_has_kv) {
  167. for (int i = 0; i < extra_bytes; ++i) {
  168. const char tmp = 0;
  169. helper_write(&tmp, sizeof(tmp), file);
  170. }
  171. rewind(file);
  172. return file;
  173. }
  174. for (int i = 0; i < int(kv_types.size()); ++i) {
  175. const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first);
  176. const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second);
  177. const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
  178. if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
  179. const uint64_t n = -1;
  180. helper_write(&n, sizeof(n), file);
  181. } else {
  182. const uint64_t n = key.length();
  183. helper_write(&n, sizeof(n), file);
  184. }
  185. helper_write(key.data(), key.length(), file);
  186. {
  187. const int32_t type32 = int32_t(type);
  188. helper_write(&type32, sizeof(type32), file);
  189. }
  190. uint32_t data[16];
  191. for (int j = 0; j < 16; ++j) {
  192. data[j] = rng();
  193. if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
  194. data[j] |= 0x01010101; // avoid random null-termination of string
  195. }
  196. }
  197. if (type == GGUF_TYPE_STRING) {
  198. const uint64_t n = rng() % sizeof(data);
  199. helper_write(&n, sizeof(n), file);
  200. helper_write(data, n, file);
  201. continue;
  202. }
  203. if (type == GGUF_TYPE_ARRAY) {
  204. {
  205. const int32_t type32 = int32_t(type_arr);
  206. helper_write(&type32, sizeof(type32), file);
  207. }
  208. if (type_arr == GGUF_TYPE_STRING) {
  209. const uint64_t nstr = rng() % (16 + 1);
  210. helper_write(&nstr, sizeof(nstr), file);
  211. for (uint64_t istr = 0; istr < nstr; ++istr) {
  212. const uint64_t n = rng() % (sizeof(uint32_t) + 1);
  213. helper_write(&n, sizeof(n), file);
  214. helper_write(&data[istr], n, file);
  215. }
  216. continue;
  217. }
  218. const size_t type_size = gguf_type_size(type_arr);
  219. const uint64_t n = (rng() % sizeof(data)) / type_size;
  220. helper_write(&n, sizeof(n), file);
  221. helper_write(&data, n*type_size, file);
  222. continue;
  223. }
  224. size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type);
  225. if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) {
  226. type_size += rng() % 3;
  227. }
  228. helper_write(data, type_size, file);
  229. }
  230. if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
  231. const std::string key = "general.alignment";
  232. {
  233. const uint64_t n = key.length();
  234. helper_write(&n, sizeof(n), file);
  235. }
  236. helper_write(key.data(), key.length(), file);
  237. const int32_t type = gguf_type(GGUF_TYPE_UINT32);
  238. helper_write(&type, sizeof(type), file);
  239. const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1;
  240. helper_write(&alignment, sizeof(alignment), file);
  241. }
  242. if (hft < offset_has_tensors) {
  243. for (int i = 0; i < extra_bytes; ++i) {
  244. const char tmp = 0;
  245. helper_write(&tmp, sizeof(tmp), file);
  246. }
  247. rewind(file);
  248. return file;
  249. }
  250. uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
  251. if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) {
  252. alignment -= 1;
  253. } else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
  254. alignment += 1;
  255. }
  256. uint64_t offset = 0;
  257. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  258. const ggml_type type = tensor_configs[i].first;
  259. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  260. std::string name = "my_tensor";
  261. if (hft != HANDCRAFTED_TENSORS_DUPLICATE_NAME) {
  262. name += "_" + std::to_string(i);
  263. }
  264. if (hft == HANDCRAFTED_TENSORS_BAD_NAME_SIZE) {
  265. name += "_with_a_very_long_name_which_is_longer_than_what_is_allowed_for_ggml_tensors";
  266. GGML_ASSERT(name.length() >= GGML_MAX_NAME);
  267. }
  268. {
  269. const uint64_t n = name.length();
  270. helper_write(&n, sizeof(n), file);
  271. }
  272. helper_write(name.data(), name.length(), file);
  273. uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
  274. for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
  275. if (shape[i] != 1) {
  276. n_dims = i + 1;
  277. break;
  278. }
  279. }
  280. if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
  281. const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
  282. helper_write(&n_dims_bad, sizeof(n_dims_bad), file);
  283. } else {
  284. helper_write(&n_dims, sizeof(n_dims), file);
  285. }
  286. if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
  287. for (uint32_t j = 0; j < n_dims; ++j) {
  288. const int64_t bad_dim = -1;
  289. helper_write(&bad_dim, sizeof(bad_dim), file);
  290. }
  291. } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
  292. for (uint32_t j = 0; j < n_dims; ++j) {
  293. const int64_t big_dim = 4*int64_t(INT32_MAX);
  294. helper_write(&big_dim, sizeof(big_dim), file);
  295. }
  296. } else {
  297. helper_write(shape.data(), n_dims*sizeof(int64_t), file);
  298. }
  299. {
  300. const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type);
  301. helper_write(&type32, sizeof(type32), file);
  302. }
  303. if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
  304. const uint64_t bad_offset = -1;
  305. helper_write(&bad_offset, sizeof(bad_offset), file);
  306. } else {
  307. helper_write(&offset, sizeof(offset), file);
  308. }
  309. int64_t ne = shape[0];
  310. for (uint32_t i = 1; i < n_dims; ++i) {
  311. ne *= shape[i];
  312. }
  313. offset += GGML_PAD(ggml_row_size(type, ne), alignment);
  314. }
  315. const uint32_t alignment_overshoot = ftell(file) % alignment;
  316. if (alignment_overshoot != 0) {
  317. for (size_t i = alignment_overshoot; i < alignment; ++i) {
  318. const char pad = 0;
  319. helper_write(&pad, sizeof(pad), file);
  320. }
  321. }
  322. if (hft >= offset_has_data) {
  323. rng.seed(seed + 1);
  324. uint64_t nbytes = offset;
  325. if (hft == HANDCRAFTED_DATA_NOT_ENOUGH_DATA) {
  326. nbytes -= 1;
  327. }
  328. for (uint64_t i = 0; i < nbytes; ++i) {
  329. const uint8_t random_byte = i % 256;
  330. helper_write(&random_byte, sizeof(random_byte), file);
  331. }
  332. }
  333. for (int i = 0; i < extra_bytes; ++i) {
  334. const char tmp = 0;
  335. helper_write(&tmp, sizeof(tmp), file);
  336. }
  337. rewind(file);
  338. return file;
  339. }
  340. static bool handcrafted_check_header(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_kv, const bool has_tensors, const bool alignment_defined) {
  341. if (!gguf_ctx) {
  342. return false;
  343. }
  344. std::mt19937 rng(seed);
  345. std::vector<tensor_config_t> tensor_configs;
  346. if (has_tensors) {
  347. tensor_configs = get_tensor_configs(rng);
  348. }
  349. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
  350. if (has_kv) {
  351. kv_types = get_kv_types(rng);
  352. }
  353. bool ok = true;
  354. if (gguf_get_version(gguf_ctx) != GGUF_VERSION) {
  355. ok = false;
  356. }
  357. if (gguf_get_n_tensors(gguf_ctx) != int(tensor_configs.size())) {
  358. ok = false;
  359. }
  360. if (gguf_get_n_kv(gguf_ctx) != int(alignment_defined ? kv_types.size() + 1 : kv_types.size())) {
  361. ok = false;
  362. }
  363. return ok;
  364. }
  365. static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_tensors, const bool alignment_defined) {
  366. if (!gguf_ctx) {
  367. return false;
  368. }
  369. std::mt19937 rng(seed);
  370. std::vector<tensor_config_t> tensor_configs;
  371. if (has_tensors) {
  372. tensor_configs = get_tensor_configs(rng);
  373. }
  374. std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types = get_kv_types(rng);
  375. bool ok = true;
  376. for (int i = 0; i < int(kv_types.size()); ++i) {
  377. const enum gguf_type type = gguf_type(kv_types[i].first);
  378. const enum gguf_type type_arr = gguf_type(kv_types[i].second);
  379. const std::string key = "my_key_" + std::to_string(i);
  380. uint32_t data[16];
  381. for (int j = 0; j < 16; ++j) {
  382. data[j] = rng();
  383. if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) {
  384. data[j] |= 0x01010101; // avoid random null-termination of string
  385. }
  386. }
  387. const char * data8 = reinterpret_cast<const char *>(data);
  388. const int id = gguf_find_key(gguf_ctx, key.c_str());
  389. if (type == GGUF_TYPE_STRING) {
  390. const char * str = gguf_get_val_str(gguf_ctx, id);
  391. const uint64_t n = strlen(str);
  392. const uint64_t n_expected = rng() % sizeof(data);
  393. if (n != n_expected) {
  394. ok = false;
  395. continue;
  396. }
  397. if (!std::equal(str, str + n, data8)) {
  398. ok = false;
  399. }
  400. continue;
  401. }
  402. if (type == GGUF_TYPE_ARRAY) {
  403. const size_t type_size = gguf_type_size(type_arr);
  404. const uint64_t arr_n = gguf_get_arr_n(gguf_ctx, id);
  405. if (type_arr == GGUF_TYPE_STRING) {
  406. const uint64_t nstr_expected = rng() % (16 + 1);
  407. if (arr_n != nstr_expected) {
  408. ok = false;
  409. continue;
  410. }
  411. for (uint64_t istr = 0; istr < nstr_expected; ++istr) {
  412. const char * str = gguf_get_arr_str(gguf_ctx, id, istr);
  413. const uint64_t n = strlen(str);
  414. const uint64_t n_expected = rng() % (sizeof(uint32_t) + 1);
  415. if (n != n_expected) {
  416. ok = false;
  417. continue;
  418. }
  419. const char * str_expected = reinterpret_cast<const char *>(&data[istr]);
  420. if (strncmp(str, str_expected, n) != 0) {
  421. ok = false;
  422. continue;
  423. }
  424. }
  425. continue;
  426. }
  427. const uint64_t arr_n_expected = (rng() % sizeof(data)) / type_size;
  428. if (arr_n != arr_n_expected) {
  429. ok = false;
  430. continue;
  431. }
  432. const char * data_gguf = reinterpret_cast<const char *>(gguf_get_arr_data(gguf_ctx, id));
  433. if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
  434. ok = false;
  435. }
  436. continue;
  437. }
  438. const char * data_gguf = reinterpret_cast<const char *>(gguf_get_val_data(gguf_ctx, id));
  439. if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
  440. ok = false;
  441. }
  442. }
  443. const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT;
  444. if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
  445. ok = false;
  446. }
  447. return ok;
  448. }
  449. static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsigned int seed) {
  450. if (!gguf_ctx) {
  451. return false;
  452. }
  453. std::mt19937 rng(seed);
  454. std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
  455. // Call get_kv_types to get the same RNG state:
  456. get_kv_types(rng);
  457. bool ok = true;
  458. const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment");
  459. const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
  460. uint64_t expected_offset = 0;
  461. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  462. const ggml_type type = tensor_configs[i].first;
  463. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  464. const std::string name = "my_tensor_" + std::to_string(i);
  465. const int id = gguf_find_tensor(gguf_ctx, name.c_str());
  466. if (id >= 0) {
  467. if (std::string(gguf_get_tensor_name(gguf_ctx, id)) != name) {
  468. ok = false;
  469. }
  470. if (gguf_get_tensor_type(gguf_ctx, id) != type) {
  471. ok = false;
  472. }
  473. } else {
  474. ok = false;
  475. continue;
  476. }
  477. const size_t offset = gguf_get_tensor_offset(gguf_ctx, id);
  478. if (offset != expected_offset) {
  479. ok = false;
  480. }
  481. int64_t ne = shape[0];
  482. for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
  483. ne *= shape[j];
  484. }
  485. expected_offset += GGML_PAD(ggml_row_size(type, ne), alignment);
  486. }
  487. return ok;
  488. }
  489. static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const unsigned int seed, FILE * file) {
  490. if (!gguf_ctx) {
  491. return false;
  492. }
  493. std::mt19937 rng(seed);
  494. std::vector<tensor_config_t> tensor_configs = get_tensor_configs(rng);
  495. bool ok = true;
  496. const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
  497. for (int i = 0; i < int(tensor_configs.size()); ++i) {
  498. const ggml_type type = tensor_configs[i].first;
  499. const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
  500. int64_t ne = shape[0];
  501. for (size_t j = 1; j < GGML_MAX_DIMS; ++j) {
  502. ne *= shape[j];
  503. }
  504. const size_t size = ggml_row_size(type, ne);
  505. const std::string name = "my_tensor_" + std::to_string(i);
  506. const size_t offset = gguf_get_tensor_offset(gguf_ctx, gguf_find_tensor(gguf_ctx, name.c_str()));
  507. std::vector<uint8_t> data(size);
  508. GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
  509. GGML_ASSERT(fread(data.data(), 1, size, file) == size);
  510. for (size_t j = 0; j < size; ++j) {
  511. const uint8_t expected_byte = (j + offset) % 256;
  512. if (data[j] != expected_byte) {
  513. ok = false;
  514. }
  515. }
  516. }
  517. return ok;
  518. }
  519. static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
  520. int npass = 0;
  521. int ntest = 0;
  522. const std::vector<handcrafted_file_type> hfts = {
  523. HANDCRAFTED_HEADER_BAD_MAGIC,
  524. HANDCRAFTED_HEADER_BAD_VERSION_1,
  525. // HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME
  526. HANDCRAFTED_HEADER_BAD_N_KV,
  527. HANDCRAFTED_HEADER_BAD_N_TENSORS,
  528. HANDCRAFTED_HEADER_EMPTY,
  529. HANDCRAFTED_KV_BAD_KEY_SIZE,
  530. HANDCRAFTED_KV_BAD_TYPE,
  531. // HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit
  532. // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME
  533. HANDCRAFTED_KV_SUCCESS,
  534. HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
  535. HANDCRAFTED_TENSORS_BAD_N_DIMS,
  536. HANDCRAFTED_TENSORS_BAD_SHAPE,
  537. HANDCRAFTED_TENSORS_NE_TOO_BIG,
  538. HANDCRAFTED_TENSORS_BAD_TYPE,
  539. // HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME
  540. HANDCRAFTED_TENSORS_DUPLICATE_NAME,
  541. // HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME
  542. HANDCRAFTED_TENSORS_SUCCESS,
  543. HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
  544. HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
  545. // HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME
  546. HANDCRAFTED_DATA_SUCCESS,
  547. HANDCRAFTED_DATA_CUSTOM_ALIGN,
  548. };
  549. for (enum handcrafted_file_type hft : hfts) {
  550. printf("%s: handcrafted_file_type=%s\n", __func__, handcrafted_file_type_name(hft).c_str());
  551. FILE * file = get_handcrafted_file(seed, hft);
  552. #ifdef _WIN32
  553. if (!file) {
  554. printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
  555. printf("%s: skipping tests");
  556. continue;
  557. }
  558. #else
  559. GGML_ASSERT(file);
  560. #endif // _WIN32
  561. struct ggml_context * ctx = nullptr;
  562. struct gguf_init_params gguf_params = {
  563. /*no_alloc =*/ false,
  564. /*ctx =*/ hft >= offset_has_data ? &ctx : nullptr,
  565. };
  566. struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
  567. if (expect_context_not_null(hft)) {
  568. printf("%s: - context_not_null: ", __func__);
  569. } else {
  570. printf("%s: - context_null: ", __func__);
  571. }
  572. if (bool(gguf_ctx) == expect_context_not_null(hft)) {
  573. printf("\033[1;32mOK\033[0m\n");
  574. npass++;
  575. } else {
  576. printf("\033[1;31mFAIL\033[0m\n");
  577. }
  578. ntest++;
  579. if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME
  580. printf("%s: - no_dangling_ggml_context_pointer: ", __func__);
  581. if (ctx) {
  582. printf("\033[1;31mFAIL\033[0m\n");
  583. } else {
  584. printf("\033[1;32mOK\033[0m\n");
  585. npass++;
  586. }
  587. ntest++;
  588. }
  589. if (false && expect_context_not_null(hft)) { // FIXME
  590. FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1);
  591. struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params);
  592. printf("%s: - context_null_with_extra_bytes: ", __func__);
  593. if (gguf_ctx_eb) {
  594. printf("\033[1;31mFAIL\033[0m\n");
  595. } else {
  596. printf("\033[1;32mOK\033[0m\n");
  597. npass++;
  598. }
  599. ntest++;
  600. gguf_free(gguf_ctx_eb);
  601. fclose(file_eb);
  602. }
  603. const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
  604. if (expect_context_not_null(hft)) {
  605. printf("%s: - check_header: ", __func__);
  606. if (handcrafted_check_header(gguf_ctx, seed, hft >= offset_has_kv, hft >= offset_has_tensors, alignment_defined)) {
  607. printf("\033[1;32mOK\033[0m\n");
  608. npass++;
  609. } else {
  610. printf("\033[1;31mFAIL\033[0m\n");
  611. }
  612. ntest++;
  613. }
  614. if (expect_context_not_null(hft) && hft >= offset_has_kv) {
  615. printf("%s: - check_kv: ", __func__);
  616. if (handcrafted_check_kv(gguf_ctx, seed, hft >= offset_has_tensors, alignment_defined)) {
  617. printf("\033[1;32mOK\033[0m\n");
  618. npass++;
  619. } else {
  620. printf("\033[1;31mFAIL\033[0m\n");
  621. }
  622. ntest++;
  623. }
  624. if (expect_context_not_null(hft) && hft >= offset_has_tensors) {
  625. printf("%s: - check_tensors: ", __func__);
  626. if (handcrafted_check_tensors(gguf_ctx, seed)) {
  627. printf("\033[1;32mOK\033[0m\n");
  628. npass++;
  629. } else {
  630. printf("\033[1;31mFAIL\033[0m\n");
  631. }
  632. ntest++;
  633. }
  634. if (expect_context_not_null(hft) && hft >= offset_has_data) {
  635. printf("%s: - check_tensor_data: ", __func__);
  636. if (handcrafted_check_tensor_data(gguf_ctx, seed, file)) {
  637. printf("\033[1;32mOK\033[0m\n");
  638. npass++;
  639. } else {
  640. printf("\033[1;31mFAIL\033[0m\n");
  641. }
  642. ntest++;
  643. }
  644. if (gguf_ctx) {
  645. ggml_free(ctx);
  646. gguf_free(gguf_ctx);
  647. }
  648. fclose(file);
  649. printf("\n");
  650. }
  651. return std::make_pair(npass, ntest);
  652. }
  653. struct random_gguf_context_result {
  654. struct gguf_context * gguf_ctx;
  655. struct ggml_context * ctx;
  656. ggml_backend_buffer_t buffer;
  657. };
  658. static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t backend, const unsigned int seed) {
  659. std::mt19937 rng(seed);
  660. struct gguf_context * gguf_ctx = gguf_init_empty();
  661. for (int i = 0; i < 256; ++i) {
  662. const std::string key = "my_key_" + std::to_string(rng() % 1024);
  663. const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
  664. if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) {
  665. continue; // FIXME memory leak
  666. }
  667. switch (type) {
  668. case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break;
  669. case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break;
  670. case GGUF_TYPE_UINT16: gguf_set_val_u16 (gguf_ctx, key.c_str(), rng() % (1 << 15)); break;
  671. case GGUF_TYPE_INT16: gguf_set_val_i16 (gguf_ctx, key.c_str(), rng() % (1 << 15) - (1 << 14)); break;
  672. case GGUF_TYPE_UINT32: gguf_set_val_u32 (gguf_ctx, key.c_str(), rng()); break;
  673. case GGUF_TYPE_INT32: gguf_set_val_i32 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
  674. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
  675. case GGUF_TYPE_BOOL: gguf_set_val_bool(gguf_ctx, key.c_str(), rng() % 2 == 0); break;
  676. case GGUF_TYPE_STRING: gguf_set_val_str (gguf_ctx, key.c_str(), std::to_string(rng()).c_str()); break;
  677. case GGUF_TYPE_UINT64: gguf_set_val_u64 (gguf_ctx, key.c_str(), rng()); break;
  678. case GGUF_TYPE_INT64: gguf_set_val_i64 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break;
  679. case GGUF_TYPE_FLOAT64: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break;
  680. case GGUF_TYPE_ARRAY: {
  681. const enum gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT);
  682. const uint64_t ne = rng() % 1024;
  683. switch (type_arr) {
  684. case GGUF_TYPE_UINT8:
  685. case GGUF_TYPE_INT8:
  686. case GGUF_TYPE_UINT16:
  687. case GGUF_TYPE_INT16:
  688. case GGUF_TYPE_UINT32:
  689. case GGUF_TYPE_INT32:
  690. case GGUF_TYPE_FLOAT32:
  691. case GGUF_TYPE_BOOL:
  692. case GGUF_TYPE_UINT64:
  693. case GGUF_TYPE_INT64:
  694. case GGUF_TYPE_FLOAT64: {
  695. const size_t nbytes = ne*gguf_type_size(type_arr);
  696. std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
  697. for (size_t j = 0; j < random_data.size(); ++j) {
  698. random_data[j] = rng();
  699. }
  700. gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
  701. } break;
  702. case GGUF_TYPE_STRING: {
  703. std::vector<std::string> data_cpp(ne);
  704. std::vector<const char *> data_c(ne);
  705. for (size_t j = 0; j < data_cpp.size(); ++j) {
  706. data_cpp[j] = std::to_string(rng());
  707. data_c[j] = data_cpp[j].c_str();
  708. }
  709. gguf_set_arr_str(gguf_ctx, key.c_str(), data_c.data(), ne);
  710. } break;
  711. case GGUF_TYPE_ARRAY: {
  712. break; // not supported
  713. }
  714. case GGUF_TYPE_COUNT:
  715. default: {
  716. GGML_ABORT("fatal error");
  717. } break;
  718. }
  719. } break;
  720. case GGUF_TYPE_COUNT:
  721. default: {
  722. GGML_ABORT("fatal error");
  723. } break;
  724. }
  725. }
  726. struct ggml_init_params ggml_params = {
  727. /*.mem_size =*/ 256*ggml_tensor_overhead(),
  728. /*.mem_buffer =*/ nullptr,
  729. /*.no_alloc =*/ true,
  730. };
  731. struct ggml_context * ctx = ggml_init(ggml_params);
  732. for (int i = 0; i < 256; ++i) {
  733. const std::string name = "my_tensor_" + std::to_string(i);
  734. const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT);
  735. const size_t type_size = ggml_type_size(type);
  736. if (type_size == 0) {
  737. continue;
  738. }
  739. const int n_dims = 1 + rng() % GGML_MAX_DIMS;
  740. int64_t ne[GGML_MAX_DIMS];
  741. ne[0] = (1 + rng() % 10) * ggml_blck_size(type);
  742. for (int j = 1; j < n_dims; ++j) {
  743. ne[j] = 1 + rng() % 10;
  744. }
  745. struct ggml_tensor * tensor = ggml_new_tensor(ctx, type, n_dims, ne);
  746. ggml_set_name(tensor, name.c_str());
  747. }
  748. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  749. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  750. const size_t nbytes = ggml_nbytes(t);
  751. std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
  752. for (size_t j = 0; j < random_data.size(); ++j) {
  753. random_data[j] = rng();
  754. }
  755. ggml_backend_tensor_set(t, random_data.data(), 0, nbytes);
  756. gguf_add_tensor(gguf_ctx, t);
  757. }
  758. return {gguf_ctx, ctx, buf};
  759. }
  760. static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other) {
  761. bool ok = true;
  762. const int n_kv = gguf_get_n_kv(ctx);
  763. for (int id = 0; id < n_kv; ++id) {
  764. const char * name = gguf_get_key(ctx, id);
  765. const int idx_other = gguf_find_key(other, name);
  766. if (idx_other < 0) {
  767. ok = false;
  768. continue;
  769. }
  770. const gguf_type type = gguf_get_kv_type(ctx, id);
  771. if (type != gguf_get_kv_type(other, idx_other)) {
  772. ok = false;
  773. continue;
  774. }
  775. if (type == GGUF_TYPE_ARRAY) {
  776. const int arr_n = gguf_get_arr_n(ctx, id);
  777. if (arr_n != gguf_get_arr_n(other, idx_other)) {
  778. ok = false;
  779. continue;
  780. }
  781. const gguf_type type_arr = gguf_get_arr_type(ctx, id);
  782. if (type_arr != gguf_get_arr_type(other, idx_other)) {
  783. ok = false;
  784. continue;
  785. }
  786. if (type_arr == GGUF_TYPE_STRING) {
  787. for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
  788. const std::string str = gguf_get_arr_str(ctx, id, arr_i);
  789. const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i);
  790. if (str != str_other) {
  791. ok = false;
  792. }
  793. }
  794. continue;
  795. }
  796. const char * data = reinterpret_cast<const char *>(gguf_get_arr_data(ctx, id));
  797. const char * data_other = reinterpret_cast<const char *>(gguf_get_arr_data(other, idx_other));
  798. if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
  799. ok = false;
  800. }
  801. continue;
  802. }
  803. if (type == GGUF_TYPE_STRING) {
  804. const std::string str = gguf_get_val_str(ctx, id);
  805. const std::string str_other = gguf_get_val_str(other, idx_other);
  806. if (str != str_other) {
  807. ok = false;
  808. }
  809. continue;
  810. }
  811. const char * data = reinterpret_cast<const char *>(gguf_get_val_data(ctx, id));
  812. const char * data_other = reinterpret_cast<const char *>(gguf_get_val_data(other, idx_other));
  813. if (!std::equal(data, data + gguf_type_size(type), data_other)) {
  814. ok = false;
  815. }
  816. }
  817. return ok;
  818. }
  819. static bool all_tensors_in_other(const gguf_context * ctx, const gguf_context * other) {
  820. bool ok = true;
  821. const int n_tensors = gguf_get_n_tensors(ctx);
  822. for (int id = 0; id < n_tensors; ++id) {
  823. const std::string name = gguf_get_tensor_name(ctx, id);
  824. const int idx_other = gguf_find_tensor(other, name.c_str());
  825. if (id != idx_other) {
  826. ok = false;
  827. if (idx_other < 0) {
  828. continue;
  829. }
  830. }
  831. const ggml_type type = gguf_get_tensor_type(ctx, id);
  832. if (type != gguf_get_tensor_type(other, id)) {
  833. ok = false;
  834. }
  835. const size_t offset = gguf_get_tensor_offset(ctx, id);
  836. if (offset != gguf_get_tensor_offset(other, id)) {
  837. ok = false;
  838. }
  839. }
  840. return ok;
  841. }
  842. static bool same_tensor_data(const struct ggml_context * orig, const struct ggml_context * read) {
  843. bool ok = true;
  844. struct ggml_tensor * t_orig = ggml_get_first_tensor(orig);
  845. struct ggml_tensor * t_read = ggml_get_first_tensor(read);
  846. while (t_orig) {
  847. if (!t_read) {
  848. ok = false;
  849. break;
  850. }
  851. const size_t nbytes = ggml_nbytes(t_orig);
  852. if (ggml_nbytes(t_read) != nbytes) {
  853. ok = false;
  854. break;
  855. }
  856. std::vector<char> data_orig(nbytes);
  857. ggml_backend_tensor_get(t_orig, data_orig.data(), 0, nbytes);
  858. if (!std::equal(data_orig.data(), data_orig.data() + nbytes, reinterpret_cast<const char *>(t_read->data))) {
  859. ok = false;
  860. }
  861. t_orig = ggml_get_next_tensor(orig, t_orig);
  862. t_read = ggml_get_next_tensor(orig, t_read);
  863. }
  864. if (t_read) {
  865. ok = false;
  866. }
  867. return true;
  868. }
  869. static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
  870. FILE * file = tmpfile();
  871. #ifdef _WIN32
  872. if (!file) {
  873. printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
  874. printf("%s: skipping tests");
  875. return std::make_pair(0, 0);
  876. }
  877. #else
  878. GGML_ASSERT(file);
  879. #endif // _WIN32
  880. if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
  881. return std::make_pair(0, 0); // FIXME
  882. }
  883. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  884. printf("%s: device=%s, backend=%s, only_meta=%s\n",
  885. __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
  886. int npass = 0;
  887. int ntest = 0;
  888. struct gguf_context * gguf_ctx_0;
  889. struct ggml_context * ctx_0;
  890. ggml_backend_buffer_t bbuf;
  891. {
  892. struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
  893. gguf_ctx_0 = result.gguf_ctx;
  894. ctx_0 = result.ctx;
  895. bbuf = result.buffer;
  896. }
  897. struct gguf_buf gbuf = gguf_buf_init(16 * 1024);
  898. gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta);
  899. helper_write(gbuf.data, gbuf.offset, file);
  900. rewind(file);
  901. struct ggml_context * ctx_1 = nullptr;
  902. struct gguf_init_params gguf_params = {
  903. /*no_alloc =*/ false,
  904. /*ctx =*/ only_meta ? nullptr : &ctx_1,
  905. };
  906. struct gguf_context * gguf_ctx_1 = gguf_init_from_file_impl(file, gguf_params);
  907. printf("%s: same_version: ", __func__);
  908. if (gguf_get_version(gguf_ctx_0) == gguf_get_version(gguf_ctx_1)) {
  909. printf("\033[1;32mOK\033[0m\n");
  910. npass++;
  911. } else {
  912. printf("\033[1;31mFAIL\033[0m\n");
  913. }
  914. ntest++;
  915. printf("%s: same_n_kv: ", __func__);
  916. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
  917. printf("\033[1;32mOK\033[0m\n");
  918. npass++;
  919. } else {
  920. printf("\033[1;31mFAIL\033[0m\n");
  921. }
  922. ntest++;
  923. printf("%s: same_n_tensors: ", __func__);
  924. if (gguf_get_n_tensors(gguf_ctx_0) == gguf_get_n_tensors(gguf_ctx_1)) {
  925. printf("\033[1;32mOK\033[0m\n");
  926. npass++;
  927. } else {
  928. printf("\033[1;31mFAIL\033[0m\n");
  929. }
  930. ntest++;
  931. printf("%s: all_orig_kv_in_read: ", __func__);
  932. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
  933. printf("\033[1;32mOK\033[0m\n");
  934. npass++;
  935. } else {
  936. printf("\033[1;31mFAIL\033[0m\n");
  937. }
  938. ntest++;
  939. printf("%s: all_read_kv_in_orig: ", __func__);
  940. if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
  941. printf("\033[1;32mOK\033[0m\n");
  942. npass++;
  943. } else {
  944. printf("\033[1;31mFAIL\033[0m\n");
  945. }
  946. ntest++;
  947. printf("%s: all_orig_tensors_in_read: ", __func__);
  948. if (all_tensors_in_other(gguf_ctx_0, gguf_ctx_1)) {
  949. printf("\033[1;32mOK\033[0m\n");
  950. npass++;
  951. } else {
  952. printf("\033[1;31mFAIL\033[0m\n");
  953. }
  954. ntest++;
  955. printf("%s: all_read_tensors_in_orig: ", __func__);
  956. if (all_tensors_in_other(gguf_ctx_1, gguf_ctx_0)) {
  957. printf("\033[1;32mOK\033[0m\n");
  958. npass++;
  959. } else {
  960. printf("\033[1;31mFAIL\033[0m\n");
  961. }
  962. ntest++;
  963. if (!only_meta) {
  964. printf("%s: same_tensor_data: ", __func__);
  965. if (same_tensor_data(ctx_0, ctx_1)) {
  966. printf("\033[1;32mOK\033[0m\n");
  967. npass++;
  968. } else {
  969. printf("\033[1;31mFAIL\033[0m\n");
  970. }
  971. ntest++;
  972. }
  973. ggml_backend_buffer_free(bbuf);
  974. ggml_free(ctx_0);
  975. ggml_free(ctx_1);
  976. gguf_free(gguf_ctx_0);
  977. gguf_free(gguf_ctx_1);
  978. gguf_buf_free(gbuf);
  979. ggml_backend_free(backend);
  980. GGML_ASSERT(fclose(file) == 0);
  981. printf("\n");
  982. return std::make_pair(npass, ntest);
  983. }
  984. static std::pair<int, int> test_gguf_set_kv(ggml_backend_dev_t dev, const unsigned int seed) {
  985. ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
  986. printf("%s: device=%s, backend=%s\n", __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend));
  987. int npass = 0;
  988. int ntest = 0;
  989. struct gguf_context * gguf_ctx_0;
  990. struct ggml_context * ctx_0;
  991. ggml_backend_buffer_t bbuf_0;
  992. {
  993. struct random_gguf_context_result result = get_random_gguf_context(backend, seed);
  994. gguf_ctx_0 = result.gguf_ctx;
  995. ctx_0 = result.ctx;
  996. bbuf_0 = result.buffer;
  997. }
  998. struct gguf_context * gguf_ctx_1;
  999. struct ggml_context * ctx_1;
  1000. ggml_backend_buffer_t bbuf_1;
  1001. {
  1002. struct random_gguf_context_result result = get_random_gguf_context(backend, seed + 1);
  1003. gguf_ctx_1 = result.gguf_ctx;
  1004. ctx_1 = result.ctx;
  1005. bbuf_1 = result.buffer;
  1006. }
  1007. struct gguf_context * gguf_ctx_2 = gguf_init_empty();
  1008. gguf_set_kv(gguf_ctx_1, gguf_ctx_0);
  1009. gguf_set_kv(gguf_ctx_2, gguf_ctx_0);
  1010. printf("%s: same_n_kv: ", __func__);
  1011. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_2)) {
  1012. printf("\033[1;32mOK\033[0m\n");
  1013. npass++;
  1014. } else {
  1015. printf("\033[1;31mFAIL\033[0m\n");
  1016. }
  1017. ntest++;
  1018. printf("%s: all_kv_0_in_1: ", __func__);
  1019. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) {
  1020. printf("\033[1;32mOK\033[0m\n");
  1021. npass++;
  1022. } else {
  1023. printf("\033[1;31mFAIL\033[0m\n");
  1024. }
  1025. ntest++;
  1026. printf("%s: all_kv_0_in_2: ", __func__);
  1027. if (all_kv_in_other(gguf_ctx_0, gguf_ctx_2)) {
  1028. printf("\033[1;32mOK\033[0m\n");
  1029. npass++;
  1030. } else {
  1031. printf("\033[1;31mFAIL\033[0m\n");
  1032. }
  1033. ntest++;
  1034. gguf_set_kv(gguf_ctx_0, gguf_ctx_1);
  1035. printf("%s: same_n_kv_after_double_copy: ", __func__);
  1036. if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) {
  1037. printf("\033[1;32mOK\033[0m\n");
  1038. npass++;
  1039. } else {
  1040. printf("\033[1;31mFAIL\033[0m\n");
  1041. }
  1042. ntest++;
  1043. printf("%s: all_kv_1_in_0_after_double_copy: ", __func__);
  1044. if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) {
  1045. printf("\033[1;32mOK\033[0m\n");
  1046. npass++;
  1047. } else {
  1048. printf("\033[1;31mFAIL\033[0m\n");
  1049. }
  1050. ntest++;
  1051. ggml_backend_buffer_free(bbuf_0);
  1052. ggml_backend_buffer_free(bbuf_1);
  1053. ggml_free(ctx_0);
  1054. ggml_free(ctx_1);
  1055. gguf_free(gguf_ctx_0);
  1056. gguf_free(gguf_ctx_1);
  1057. gguf_free(gguf_ctx_2);
  1058. ggml_backend_free(backend);
  1059. printf("\n");
  1060. return std::make_pair(npass, ntest);
  1061. }
  1062. static void print_usage() {
  1063. printf("usage: test-gguf [seed]\n");
  1064. printf(" if no seed is unspecified then a random seed is used\n");
  1065. }
  1066. int main(int argc, char ** argv) {
  1067. if (argc > 2) {
  1068. print_usage();
  1069. return 1;
  1070. }
  1071. std::random_device rd;
  1072. const unsigned int seed = argc < 2 ? rd() : std::stoi(argv[1]);
  1073. // Initialize ggml backends early so the prints aren't interleaved with the test results:
  1074. ggml_backend_dev_count();
  1075. fprintf(stderr, "\n");
  1076. int npass = 0;
  1077. int ntest = 0;
  1078. {
  1079. std::pair<int, int> result = test_handcrafted_file(seed);
  1080. npass += result.first;
  1081. ntest += result.second;
  1082. }
  1083. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  1084. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  1085. for (bool only_meta : {true, false}) {
  1086. std::pair<int, int> result = test_roundtrip(dev, seed, only_meta);
  1087. npass += result.first;
  1088. ntest += result.second;
  1089. }
  1090. {
  1091. std::pair<int, int> result = test_gguf_set_kv(dev, seed);
  1092. npass += result.first;
  1093. ntest += result.second;
  1094. }
  1095. }
  1096. printf("%d/%d tests passed\n", npass, ntest);
  1097. if (npass != ntest) {
  1098. printf("\033[1;31mFAIL\033[0m\n");
  1099. return 1;
  1100. }
  1101. printf("\033[1;32mOK\033[0m\n");
  1102. return 0;
  1103. }