test-backend-ops.cpp 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443
  1. #include <ggml.h>
  2. #include <ggml-alloc.h>
  3. #include <ggml-backend.h>
  4. #include <ggml-backend-impl.h>
  5. #include <algorithm>
  6. #include <array>
  7. #include <cfloat>
  8. #include <cstring>
  9. #include <functional>
  10. #include <memory>
  11. #include <random>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string>
  15. #include <thread>
  16. #include <vector>
  17. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  18. // static RNG initialization (revisit if n_threads stops being constant)
  19. static const size_t n_threads = std::thread::hardware_concurrency();
  20. static std::vector<std::default_random_engine> generators = []() {
  21. std::random_device rd;
  22. std::vector<std::default_random_engine> vec;
  23. vec.reserve(n_threads);
  24. //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
  25. for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
  26. return vec;
  27. }();
  28. size_t size = ggml_nelements(tensor);
  29. std::vector<float> data(size);
  30. auto init_thread = [&](size_t ith, size_t start, size_t end) {
  31. std::uniform_real_distribution<float> distribution(min, max);
  32. for (size_t i = start; i < end; i++) {
  33. data[i] = distribution(generators[ith]);
  34. }
  35. };
  36. std::vector<std::thread> threads;
  37. threads.reserve(n_threads);
  38. for (size_t i = 0; i < n_threads; i++) {
  39. size_t start = i*size/n_threads;
  40. size_t end = (i+1)*size/n_threads;
  41. threads.emplace_back(init_thread, i, start, end);
  42. }
  43. for (auto & t : threads) {
  44. t.join();
  45. }
  46. #if 0
  47. const char * val_str = getenv("GGML_TEST_EPS");
  48. float val = 1e-9f;
  49. if (val_str != nullptr) {
  50. val = std::stof(val_str);
  51. printf("GGML_TEST_EPS=%e\n", val);
  52. }
  53. // test quantization with very small values that may result in nan scales due to division by zero
  54. if (ggml_is_quantized(tensor->type)) {
  55. for (int i = 0; i < 256; i++) {
  56. data[i] = val;
  57. }
  58. }
  59. #endif
  60. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  61. ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
  62. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
  63. GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
  64. std::vector<uint8_t> dataq(ggml_row_size(tensor->type, size));
  65. std::vector<float> imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
  66. const float * im = imatrix.data();
  67. if (!ggml_quantize_requires_imatrix(tensor->type)) {
  68. // when the imatrix is optional, we want to test both quantization with and without imatrix
  69. // use one of the random numbers to decide
  70. if (data[0] > 0.5f*(min + max)) {
  71. im = nullptr;
  72. }
  73. }
  74. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
  75. GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
  76. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  77. } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
  78. // This is going to create some weird integers though.
  79. ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
  80. } else {
  81. GGML_ASSERT(false);
  82. }
  83. }
  84. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  85. std::vector<float> tv;
  86. tv.reserve(ggml_nelements(t));
  87. std::vector<uint8_t> buf(ggml_nbytes(t));
  88. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  89. ggml_type_traits_t tt = ggml_internal_get_type_traits(t->type);
  90. size_t bs = ggml_blck_size(t->type);
  91. std::vector<float> vq(ggml_blck_size(t->type));
  92. bool quantized = ggml_is_quantized(t->type);
  93. // access elements by index to avoid gaps in views
  94. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  95. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  96. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  97. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  98. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  99. if (t->type == GGML_TYPE_F16) {
  100. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  101. } else if (t->type == GGML_TYPE_BF16) {
  102. tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
  103. } else if (t->type == GGML_TYPE_F32) {
  104. tv.push_back(*(float *) &buf[i]);
  105. } else if (t->type == GGML_TYPE_I32) {
  106. tv.push_back((float)*(int32_t *) &buf[i]);
  107. } else if (t->type == GGML_TYPE_I16) {
  108. tv.push_back((float)*(int16_t *) &buf[i]);
  109. } else if (t->type == GGML_TYPE_I8) {
  110. tv.push_back((float)*(int8_t *) &buf[i]);
  111. } else if (quantized) {
  112. tt.to_float(&buf[i], vq.data(), bs);
  113. tv.insert(tv.end(), vq.begin(), vq.end());
  114. } else {
  115. GGML_ASSERT(false);
  116. }
  117. }
  118. }
  119. }
  120. }
  121. return tv;
  122. }
  123. /*
  124. static double cosine_similarity(const float * v1, const float * v2, size_t n) {
  125. double dot = 0.0;
  126. double mag1 = 0.0;
  127. double mag2 = 0.0;
  128. for (size_t i = 0; i < n; i++) {
  129. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  130. return -1.0f;
  131. }
  132. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  133. continue;
  134. }
  135. dot += v1[i]*v2[i];
  136. mag1 += v1[i]*v1[i];
  137. mag2 += v2[i]*v2[i];
  138. }
  139. return dot/sqrt(mag1*mag2);
  140. }
  141. static float distance(const float * v1, const float * v2, size_t n) {
  142. double d = 0.0;
  143. for (size_t i = 0; i < n; i++) {
  144. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  145. return INFINITY;
  146. }
  147. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  148. continue;
  149. }
  150. d += (v1[i] - v2[i])*(v1[i] - v2[i]);
  151. }
  152. return sqrt(d);
  153. }
  154. static float vec_len(const float * v, size_t n) {
  155. double d = 0.0;
  156. for (size_t i = 0; i < n; i++) {
  157. if (std::isnan(v[i])) {
  158. return INFINITY;
  159. }
  160. if (std::isinf(v[i])) {
  161. continue;
  162. }
  163. d += v[i]*v[i];
  164. }
  165. return sqrt(d);
  166. }
  167. */
  168. // normalized mean squared error = mse(a, b) / mse(a, 0)
  169. static double nmse(const float * a, const float * b, size_t n) {
  170. double mse_a_b = 0.0;
  171. double mse_a_0 = 0.0;
  172. for (size_t i = 0; i < n; i++) {
  173. float a_i = a[i];
  174. float b_i = b[i];
  175. mse_a_b += (a_i - b_i) * (a_i - b_i);
  176. mse_a_0 += a_i * a_i;
  177. }
  178. return mse_a_b / mse_a_0;
  179. }
  180. // utils for printing the variables of the test cases
  181. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  182. template<typename T>
  183. static std::string var_to_str(const T & x) {
  184. return std::to_string(x);
  185. }
  186. template<typename T, size_t N>
  187. static std::string var_to_str(const T (&x)[N]) {
  188. std::string s = "[";
  189. for (size_t i = 0; i < N; i++) {
  190. if (i > 0) {
  191. s += ",";
  192. }
  193. s += var_to_str(x[i]);
  194. }
  195. s += "]";
  196. return s;
  197. }
  198. template<typename T, size_t N>
  199. static std::string var_to_str(const std::array<T, N> & x) {
  200. std::string s = "[";
  201. for (size_t i = 0; i < N; i++) {
  202. if (i > 0) {
  203. s += ",";
  204. }
  205. s += var_to_str(x[i]);
  206. }
  207. s += "]";
  208. return s;
  209. }
  210. //static std::string var_to_str(ggml_unary_op unary_op) {
  211. // return ggml_unary_op_name(unary_op);
  212. //}
  213. static std::string var_to_str(ggml_type type) {
  214. return ggml_type_name(type);
  215. }
  216. static std::string var_to_str(ggml_op_pool pool) {
  217. switch (pool) {
  218. case GGML_OP_POOL_AVG: return "avg";
  219. case GGML_OP_POOL_MAX: return "max";
  220. default: return std::to_string(pool);
  221. }
  222. }
  223. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  224. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  225. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  226. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  227. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  228. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  229. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  230. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  231. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  232. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  233. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  234. #define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
  235. #ifdef GGML_USE_SYCL
  236. static bool inline _isinf(float f) {
  237. return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
  238. }
  239. #else
  240. static bool inline _isinf(float f) { return std::isinf(f); }
  241. #endif
  242. // accept FLT_MAX as infinity
  243. static bool isinf_or_max(float f) {
  244. return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  245. }
  246. static bool ggml_is_view_op(enum ggml_op op) {
  247. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  248. }
  249. enum test_mode {
  250. MODE_TEST,
  251. MODE_PERF,
  252. };
  253. struct test_case {
  254. virtual ~test_case() {}
  255. virtual std::string op_desc(ggml_tensor * t) {
  256. return ggml_op_desc(t);
  257. }
  258. virtual std::string vars() {
  259. return "";
  260. }
  261. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  262. virtual double max_nmse_err() {
  263. return 1e-7;
  264. }
  265. virtual void initialize_tensors(ggml_context * ctx) {
  266. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  267. init_tensor_uniform(t);
  268. }
  269. }
  270. virtual size_t op_size(ggml_tensor * t) {
  271. size_t size = ggml_nbytes(t);
  272. // add source tensors
  273. for (int i = 0; i < GGML_MAX_SRC; i++) {
  274. if (t->src[i] != NULL) {
  275. size += ggml_nbytes(t->src[i]);
  276. }
  277. }
  278. return size;
  279. }
  280. ggml_cgraph * gf = nullptr;
  281. static const int sentinel_size = 1024;
  282. test_mode mode;
  283. std::vector<ggml_tensor *> sentinels;
  284. void add_sentinel(ggml_context * ctx) {
  285. if (mode == MODE_PERF) {
  286. return;
  287. }
  288. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  289. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  290. sentinels.push_back(sentinel);
  291. }
  292. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  293. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  294. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  295. add_sentinel(ctx);
  296. return t;
  297. }
  298. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  299. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  300. add_sentinel(ctx);
  301. return t;
  302. }
  303. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  304. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  305. add_sentinel(ctx);
  306. return t;
  307. }
  308. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  309. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  310. add_sentinel(ctx);
  311. return t;
  312. }
  313. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  314. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  315. add_sentinel(ctx);
  316. return t;
  317. }
  318. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  319. mode = MODE_TEST;
  320. ggml_init_params params = {
  321. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  322. /* .mem_base = */ NULL,
  323. /* .no_alloc = */ true,
  324. };
  325. ggml_context * ctx = ggml_init(params);
  326. gf = ggml_new_graph(ctx);
  327. // pre-graph sentinel
  328. add_sentinel(ctx);
  329. ggml_tensor * out = build_graph(ctx);
  330. if (op_name != nullptr && op_desc(out) != op_name) {
  331. //printf(" %s: skipping\n", op_desc(out).c_str());
  332. ggml_free(ctx);
  333. return true;
  334. }
  335. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  336. fflush(stdout);
  337. // check if the backends support the ops
  338. bool supported = true;
  339. for (ggml_backend_t backend : {backend1, backend2}) {
  340. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  341. if (!ggml_backend_supports_op(backend, t)) {
  342. printf("not supported [%s] ", ggml_backend_name(backend));
  343. supported = false;
  344. break;
  345. }
  346. }
  347. }
  348. if (!supported) {
  349. printf("\n");
  350. ggml_free(ctx);
  351. return true;
  352. }
  353. // post-graph sentinel
  354. add_sentinel(ctx);
  355. // allocate
  356. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  357. if (buf == NULL) {
  358. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
  359. ggml_free(ctx);
  360. return false;
  361. }
  362. // build graph
  363. ggml_build_forward_expand(gf, out);
  364. // add sentinels as graph nodes so that they are checked in the callback
  365. for (ggml_tensor * sentinel : sentinels) {
  366. gf->nodes[gf->n_nodes++] = sentinel;
  367. }
  368. // randomize tensors
  369. initialize_tensors(ctx);
  370. // compare
  371. struct callback_userdata {
  372. bool ok;
  373. double max_err;
  374. ggml_backend_t backend1;
  375. ggml_backend_t backend2;
  376. };
  377. callback_userdata ud {
  378. true,
  379. max_nmse_err(),
  380. backend1,
  381. backend2
  382. };
  383. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  384. callback_userdata * ud = (callback_userdata *) user_data;
  385. const char * bn1 = ggml_backend_name(ud->backend1);
  386. const char * bn2 = ggml_backend_name(ud->backend2);
  387. if (t1->op == GGML_OP_NONE) {
  388. // sentinels must be unchanged
  389. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  390. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  391. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  392. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  393. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  394. printf("sentinel mismatch: %s ", t1->name);
  395. ud->ok = false;
  396. return true;
  397. }
  398. }
  399. std::vector<float> f1 = tensor_to_float(t1);
  400. std::vector<float> f2 = tensor_to_float(t2);
  401. for (size_t i = 0; i < f1.size(); i++) {
  402. // check for nans
  403. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  404. printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
  405. ud->ok = false;
  406. return true;
  407. }
  408. // check for infs: both must be inf of the same sign, or both must be finite
  409. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  410. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  411. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  412. printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  413. ud->ok = false;
  414. return true;
  415. }
  416. } else {
  417. printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  418. ud->ok = false;
  419. return true;
  420. }
  421. }
  422. }
  423. double err = nmse(f1.data(), f2.data(), f1.size());
  424. if (err > ud->max_err) {
  425. printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
  426. //for (int i = 0; i < (int) f1.size(); i++) {
  427. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  428. //}
  429. //printf("\n");
  430. //exit(1);
  431. ud->ok = false;
  432. }
  433. return true;
  434. GGML_UNUSED(index);
  435. };
  436. const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
  437. if (!cmp_ok) {
  438. printf("compare failed ");
  439. }
  440. ggml_backend_buffer_free(buf);
  441. ggml_free(ctx);
  442. if (ud.ok && cmp_ok) {
  443. printf("\033[1;32mOK\033[0m\n");
  444. return true;
  445. }
  446. printf("\033[1;31mFAIL\033[0m\n");
  447. return false;
  448. }
  449. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  450. mode = MODE_PERF;
  451. static const size_t graph_nodes = 8192;
  452. ggml_init_params params = {
  453. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  454. /* .mem_base = */ NULL,
  455. /* .no_alloc = */ true,
  456. };
  457. ggml_context * ctx = ggml_init(params);
  458. ggml_tensor * out = build_graph(ctx);
  459. if (op_name != nullptr && op_desc(out) != op_name) {
  460. //printf(" %s: skipping\n", op_desc(out).c_str());
  461. ggml_free(ctx);
  462. return true;
  463. }
  464. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  465. fflush(stdout);
  466. // check if backends support op
  467. if (!ggml_backend_supports_op(backend, out)) {
  468. printf("not supported\n");
  469. ggml_free(ctx);
  470. return true;
  471. }
  472. // align while also leaving some margin for variations in parameters
  473. int align = 20;
  474. int last = (len + align - 1) / align * align;
  475. if (last - len < 5) {
  476. last += align;
  477. }
  478. last = std::max(last, 60);
  479. printf("%*s", last - len, "");
  480. // allocate
  481. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  482. if (buf == NULL) {
  483. printf("failed to allocate tensors\n");
  484. ggml_free(ctx);
  485. return false;
  486. }
  487. // randomize tensors
  488. initialize_tensors(ctx);
  489. // build graph
  490. ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
  491. ggml_build_forward_expand(gf, out);
  492. // warmup run
  493. ggml_backend_graph_compute(backend, gf);
  494. // duplicate the op
  495. size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
  496. int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
  497. for (int i = 1; i < n_runs; i++) {
  498. gf->nodes[gf->n_nodes++] = out;
  499. }
  500. // calculate memory
  501. size_t mem = n_runs * op_size(out);
  502. auto tensor_op_size = [](ggml_tensor * t) {
  503. size_t size = ggml_nbytes(t);
  504. // add source tensors
  505. for (int i = 0; i < GGML_MAX_SRC; i++) {
  506. if (t->src[i] != NULL) {
  507. size += ggml_nbytes(t->src[i]);
  508. }
  509. }
  510. return size;
  511. };
  512. for (int i = 0; i < gf->n_nodes; i++) {
  513. if (ggml_is_view_op(gf->nodes[i]->op) || gf->nodes[i] == out) {
  514. continue;
  515. }
  516. mem += tensor_op_size(gf->nodes[i]);
  517. }
  518. // run
  519. ggml_backend_synchronize(backend);
  520. int64_t start_time = ggml_time_us();
  521. ggml_backend_graph_compute(backend, gf);
  522. ggml_backend_synchronize(backend);
  523. int64_t end_time = ggml_time_us();
  524. double time_us = end_time - start_time;
  525. printf(" %5d runs - %8.2f us/run - %8zu kB/run - \033[1;34m%7.2f GB/s\033[0m\n",
  526. n_runs,
  527. time_us / n_runs,
  528. op_size(out) / 1024,
  529. mem / (time_us/1e6) / 1024.0 / 1024.0 / 1024.0);
  530. ggml_backend_buffer_free(buf);
  531. ggml_free(ctx);
  532. return true;
  533. }
  534. };
  535. // GGML_OP_UNARY
  536. struct test_unary : public test_case {
  537. const ggml_unary_op op;
  538. const ggml_type type;
  539. const std::array<int64_t, 4> ne_a;
  540. int v; // view (1 : non-contiguous a)
  541. std::string vars() override {
  542. return VARS_TO_STR3(type, ne_a, v);
  543. }
  544. test_unary(ggml_unary_op op,
  545. ggml_type type = GGML_TYPE_F32,
  546. std::array<int64_t, 4> ne_a = {128, 10, 10, 10},
  547. int v = 0)
  548. : op(op), type(type), ne_a(ne_a), v(v) {}
  549. ggml_tensor * build_graph(ggml_context * ctx) override {
  550. ggml_tensor * a;
  551. if (v & 1) {
  552. auto ne = ne_a; ne[0] *= 3;
  553. a = ggml_new_tensor(ctx, type, 4, ne.data());
  554. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  555. } else {
  556. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  557. }
  558. ggml_tensor * out = ggml_unary(ctx, a, op);
  559. return out;
  560. }
  561. void initialize_tensors(ggml_context * ctx) override {
  562. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  563. // test extended range of values to check for NaNs in GELU
  564. init_tensor_uniform(t, -150.f, 150.f);
  565. }
  566. }
  567. };
  568. // GGML_OP_GET_ROWS
  569. struct test_get_rows : public test_case {
  570. const ggml_type type;
  571. const int n; // cols
  572. const int m; // rows
  573. const int r; // rows to get
  574. const int b; // batch size
  575. const bool v; // view (non-contiguous src1)
  576. std::string vars() override {
  577. return VARS_TO_STR6(type, n, m, r, b, v);
  578. }
  579. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  580. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  581. ggml_tensor * build_graph(ggml_context * ctx) override {
  582. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  583. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  584. if (v) {
  585. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  586. }
  587. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  588. return out;
  589. }
  590. void initialize_tensors(ggml_context * ctx) override {
  591. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  592. if (t->type == GGML_TYPE_I32) {
  593. if (ggml_is_view_op(t->op)) { continue; }
  594. // rows
  595. std::vector<int> data(r*b);
  596. for (int i = 0; i < r*b; i++) {
  597. data[i] = rand() % m;
  598. }
  599. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  600. } else {
  601. init_tensor_uniform(t);
  602. }
  603. }
  604. }
  605. };
  606. // GGML_OP_REPEAT
  607. struct test_repeat : public test_case {
  608. const ggml_type type;
  609. const std::array<int64_t, 4> ne;
  610. const std::array<int, 4> nr;
  611. std::string vars() override {
  612. return VARS_TO_STR3(type, ne, nr);
  613. }
  614. size_t op_size(ggml_tensor * t) override {
  615. return ggml_nbytes(t) * 2;
  616. }
  617. test_repeat(ggml_type type = GGML_TYPE_F32,
  618. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  619. std::array<int, 4> nr = {2, 2, 2, 2})
  620. : type(type), ne(ne), nr(nr) {}
  621. ggml_tensor * build_graph(ggml_context * ctx) override {
  622. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  623. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  624. ggml_tensor * out = ggml_repeat(ctx, src, target);
  625. return out;
  626. }
  627. };
  628. // GGML_OP_DUP
  629. struct test_dup : public test_case {
  630. const ggml_type type;
  631. const std::array<int64_t, 4> ne;
  632. const std::array<int64_t, 4> permute;
  633. bool _use_permute;
  634. std::string vars() override {
  635. std::string v = VARS_TO_STR2(type, ne);
  636. if (_use_permute) v += "," + VAR_TO_STR(permute);
  637. return v;
  638. }
  639. test_dup(ggml_type type = GGML_TYPE_F32,
  640. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  641. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  642. : type(type), ne(ne), permute(permute),
  643. _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  644. ggml_tensor * build_graph(ggml_context * ctx) override {
  645. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  646. if (_use_permute) {
  647. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  648. }
  649. ggml_tensor * out = ggml_dup(ctx, src);
  650. return out;
  651. }
  652. };
  653. // GGML_OP_CPY
  654. struct test_cpy : public test_case {
  655. const ggml_type type_src;
  656. const ggml_type type_dst;
  657. const std::array<int64_t, 4> ne;
  658. std::string vars() override {
  659. return VARS_TO_STR3(type_src, type_dst, ne);
  660. }
  661. size_t op_size(ggml_tensor * t) override {
  662. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  663. }
  664. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  665. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  666. : type_src(type_src), type_dst(type_dst), ne(ne) {}
  667. ggml_tensor * build_graph(ggml_context * ctx) override {
  668. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  669. ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, ne.data());
  670. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  671. return out;
  672. }
  673. };
  674. // GGML_OP_CONT
  675. struct test_cont : public test_case {
  676. const ggml_type type;
  677. const std::array<int64_t, 4> ne;
  678. std::string vars() override {
  679. return VARS_TO_STR2(type, ne);
  680. }
  681. test_cont(ggml_type type = GGML_TYPE_F32,
  682. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  683. : type(type), ne(ne) {}
  684. ggml_tensor * build_graph(ggml_context * ctx) override {
  685. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  686. src = ggml_transpose(ctx, src);
  687. ggml_tensor * out = ggml_cont(ctx, src);
  688. return out;
  689. }
  690. };
  691. // GGML_OP_ADD
  692. // GGML_OP_MUL
  693. // GGML_OP_DIV
  694. struct test_bin_bcast : public test_case {
  695. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  696. op_t op;
  697. const ggml_type type;
  698. const std::array<int64_t, 4> ne;
  699. const std::array<int, 4> nr;
  700. std::string vars() override {
  701. return VARS_TO_STR3(type, ne, nr);
  702. }
  703. size_t op_size(ggml_tensor * t) override {
  704. return ggml_nbytes(t) * 3;
  705. }
  706. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  707. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  708. std::array<int, 4> nr = {1, 2, 1, 1})
  709. : op(op), type(type), ne(ne), nr(nr) {}
  710. ggml_tensor * build_graph(ggml_context * ctx) override {
  711. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  712. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  713. ggml_tensor * out = op(ctx, a, b);
  714. return out;
  715. }
  716. void initialize_tensors(ggml_context * ctx) override {
  717. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  718. if (op == ggml_div) {
  719. // avoid division by zero
  720. init_tensor_uniform(t, 1.0f, 2.0f);
  721. } else {
  722. init_tensor_uniform(t);
  723. }
  724. }
  725. }
  726. };
  727. // GGML_OP_SCALE
  728. struct test_scale : public test_case {
  729. const ggml_type type;
  730. const std::array<int64_t, 4> ne;
  731. float scale;
  732. std::string vars() override {
  733. return VARS_TO_STR3(type, ne, scale);
  734. }
  735. test_scale(ggml_type type = GGML_TYPE_F32,
  736. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  737. float scale = 2.0f)
  738. : type(type), ne(ne), scale(scale) {}
  739. ggml_tensor * build_graph(ggml_context * ctx) override {
  740. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  741. ggml_tensor * out = ggml_scale(ctx, a, scale);
  742. return out;
  743. }
  744. };
  745. // GGML_OP_NORM
  746. struct test_norm : public test_case {
  747. const ggml_type type;
  748. const std::array<int64_t, 4> ne;
  749. float eps;
  750. std::string vars() override {
  751. return VARS_TO_STR3(type, ne, eps);
  752. }
  753. test_norm(ggml_type type = GGML_TYPE_F32,
  754. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  755. float eps = 1e-6f)
  756. : type(type), ne(ne), eps(eps) {}
  757. ggml_tensor * build_graph(ggml_context * ctx) override {
  758. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  759. ggml_tensor * out = ggml_norm(ctx, a, eps);
  760. return out;
  761. }
  762. };
  763. // GGML_OP_RMS_NORM
  764. struct test_rms_norm : public test_case {
  765. const ggml_type type;
  766. const std::array<int64_t, 4> ne;
  767. float eps;
  768. std::string vars() override {
  769. return VARS_TO_STR3(type, ne, eps);
  770. }
  771. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  772. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  773. float eps = 1e-6f)
  774. : type(type), ne(ne), eps(eps) {}
  775. ggml_tensor * build_graph(ggml_context * ctx) override {
  776. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  777. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  778. return out;
  779. }
  780. };
  781. // GGML_OP_MUL_MAT
  782. struct test_mul_mat : public test_case {
  783. const ggml_type type_a;
  784. const ggml_type type_b;
  785. const int64_t m;
  786. const int64_t n;
  787. const int64_t k;
  788. const std::array<int64_t, 2> bs; // dims 3 and 4
  789. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  790. std::string vars() override {
  791. return VARS_TO_STR7(type_a, type_b, m, n, k, bs, nr);
  792. }
  793. double max_nmse_err() override {
  794. return 5e-4;
  795. }
  796. size_t op_size(ggml_tensor * t) override {
  797. size_t a = ggml_nbytes(t->src[0]) * n * nr[0] * nr[1];
  798. size_t b = ggml_nbytes(t->src[1]) * m;
  799. size_t c = ggml_nbytes(t);
  800. return a + b + c;
  801. GGML_UNUSED(t);
  802. }
  803. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  804. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  805. std::array<int64_t, 2> bs = {10, 10},
  806. std::array<int64_t, 2> nr = {2, 2})
  807. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr) {}
  808. ggml_tensor * build_graph(ggml_context * ctx) override {
  809. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  810. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0] , bs[1]);
  811. ggml_tensor * b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  812. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  813. return out;
  814. }
  815. };
  816. // GGML_OP_MUL_MAT_ID
  817. struct test_mul_mat_id : public test_case {
  818. const ggml_type type_a;
  819. const ggml_type type_b;
  820. const int n_mats;
  821. const int n_used;
  822. const bool b; // brodcast b matrix
  823. const int64_t m;
  824. const int64_t n;
  825. const int64_t k;
  826. std::string vars() override {
  827. return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k);
  828. }
  829. double max_nmse_err() override {
  830. return 5e-4;
  831. }
  832. size_t op_size(ggml_tensor * t) override {
  833. size_t a = ggml_nbytes(t->src[2]) * n;
  834. size_t b = ggml_nbytes(t->src[1]) * m;
  835. size_t c = ggml_nbytes(t);
  836. return a + b + c;
  837. GGML_UNUSED(t);
  838. }
  839. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  840. int n_mats = 8, int n_used = 2, bool b = false,
  841. int64_t m = 32, int64_t n = 32, int64_t k = 32)
  842. : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b),
  843. m(m), n(n), k(k) {
  844. GGML_ASSERT(n_used <= n_mats);
  845. }
  846. ggml_tensor * build_graph(ggml_context * ctx) override {
  847. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  848. ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats);
  849. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  850. if (n_used != n_mats) {
  851. ids = ggml_view_2d(ctx, ids, n_used, n, ids->nb[1], 0);
  852. }
  853. ggml_tensor * b = ggml_new_tensor_3d(ctx, type_b, k, this->b ? 1 : n_used, n);
  854. ggml_tensor * out = ggml_mul_mat_id(ctx, as, b, ids);
  855. return out;
  856. }
  857. void initialize_tensors(ggml_context * ctx) override {
  858. std::random_device rd;
  859. std::default_random_engine rng(rd());
  860. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  861. if (t->type == GGML_TYPE_I32) {
  862. if (ggml_is_view_op(t->op)) { continue; }
  863. // ids
  864. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  865. std::vector<int32_t> data(t->ne[0]);
  866. for (int i = 0; i < t->ne[0]; i++) {
  867. data[i] = i % n_mats;
  868. }
  869. std::shuffle(data.begin(), data.end(), rng);
  870. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  871. }
  872. } else {
  873. init_tensor_uniform(t);
  874. }
  875. }
  876. }
  877. };
  878. // GGML_OP_SQR
  879. struct test_sqr : public test_case {
  880. const ggml_type type;
  881. const std::array<int64_t, 4> ne;
  882. std::string vars() override {
  883. return VARS_TO_STR2(type, ne);
  884. }
  885. test_sqr(ggml_type type = GGML_TYPE_F32,
  886. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  887. : type(type), ne(ne) {}
  888. ggml_tensor * build_graph(ggml_context * ctx) override {
  889. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  890. ggml_tensor * out = ggml_sqr(ctx, a);
  891. return out;
  892. }
  893. };
  894. // GGML_OP_CLAMP
  895. struct test_clamp : public test_case {
  896. const ggml_type type;
  897. const std::array<int64_t, 4> ne;
  898. float min;
  899. float max;
  900. std::string vars() override {
  901. return VARS_TO_STR4(type, ne, min, max);
  902. }
  903. test_clamp(ggml_type type = GGML_TYPE_F32,
  904. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  905. float min = -0.5f, float max = 0.5f)
  906. : type(type), ne(ne), min(min), max(max) {}
  907. ggml_tensor * build_graph(ggml_context * ctx) override {
  908. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  909. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  910. return out;
  911. }
  912. };
  913. // GGML_OP_DIAG_MASK_INF
  914. struct test_diag_mask_inf : public test_case {
  915. const ggml_type type;
  916. const std::array<int64_t, 4> ne;
  917. const int n_past;
  918. std::string vars() override {
  919. return VARS_TO_STR3(type, ne, n_past);
  920. }
  921. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  922. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  923. int n_past = 5)
  924. : type(type), ne(ne), n_past(n_past) {}
  925. ggml_tensor * build_graph(ggml_context * ctx) override {
  926. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  927. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  928. return out;
  929. }
  930. };
  931. // GGML_OP_SOFT_MAX
  932. struct test_soft_max : public test_case {
  933. const ggml_type type;
  934. const std::array<int64_t, 4> ne;
  935. const bool mask;
  936. const float scale;
  937. const float max_bias;
  938. std::string vars() override {
  939. return VARS_TO_STR5(type, ne, mask, scale, max_bias);
  940. }
  941. // the 1024 test with bias occasionally fails:
  942. // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL
  943. virtual double max_nmse_err() override {
  944. return 1e-6;
  945. }
  946. test_soft_max(ggml_type type = GGML_TYPE_F32,
  947. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  948. bool mask = false,
  949. float scale = 1.0f,
  950. float max_bias = 0.0f)
  951. : type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {}
  952. ggml_tensor * build_graph(ggml_context * ctx) override {
  953. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  954. ggml_tensor * mask = nullptr;
  955. if (this->mask) {
  956. mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
  957. }
  958. ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
  959. return out;
  960. }
  961. };
  962. // GGML_OP_ROPE
  963. struct test_rope : public test_case {
  964. const ggml_type type;
  965. const std::array<int64_t, 4> ne_a;
  966. int n_dims;
  967. int mode;
  968. int n_ctx; // used to generate positions
  969. float fs; // freq_scale
  970. float ef; // ext_factor
  971. float af; // attn_factor
  972. bool ff;
  973. int v; // view (1 : non-contiguous a)
  974. std::string vars() override {
  975. return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v);
  976. }
  977. test_rope(ggml_type type = GGML_TYPE_F32,
  978. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  979. int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f, float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0)
  980. : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v) {}
  981. ggml_tensor * build_graph(ggml_context * ctx) override {
  982. ggml_tensor * a;
  983. if (v & 1) {
  984. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  985. a = ggml_new_tensor(ctx, type, 4, ne.data());
  986. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  987. } else {
  988. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  989. }
  990. ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]);
  991. ggml_tensor * freq = ff ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2) : nullptr;
  992. ggml_tensor * out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  993. return out;
  994. }
  995. void initialize_tensors(ggml_context * ctx) override {
  996. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  997. if (t->type == GGML_TYPE_I32) {
  998. // pos
  999. std::vector<int> data(ne_a[2]);
  1000. for (int i = 0; i < ne_a[2]; i++) {
  1001. data[i] = rand() % n_ctx;
  1002. }
  1003. ggml_backend_tensor_set(t, data.data(), 0, ne_a[2] * sizeof(int));
  1004. } else {
  1005. if (t->ne[0] == n_dims/2) {
  1006. // frequency factors in the range [0.9f, 1.1f]
  1007. init_tensor_uniform(t, 0.9f, 1.1f);
  1008. } else {
  1009. init_tensor_uniform(t);
  1010. }
  1011. }
  1012. }
  1013. }
  1014. };
  1015. // GGML_OP_POOL2D
  1016. struct test_pool2d : public test_case {
  1017. enum ggml_op_pool pool_type;
  1018. const ggml_type type_input;
  1019. const std::array<int64_t, 4> ne_input;
  1020. // kernel size
  1021. const int k0;
  1022. const int k1;
  1023. // stride
  1024. const int s0;
  1025. const int s1;
  1026. // padding
  1027. const int p0;
  1028. const int p1;
  1029. std::string vars() override {
  1030. return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1);
  1031. }
  1032. test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG,
  1033. ggml_type type_input = GGML_TYPE_F32,
  1034. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1035. int k0 = 3, int k1 = 3,
  1036. int s0 = 1, int s1 = 1,
  1037. int p0 = 1, int p1 = 1)
  1038. : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {}
  1039. ggml_tensor * build_graph(ggml_context * ctx) override {
  1040. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1041. ggml_tensor * out = ggml_pool_2d(ctx, input, pool_type, k0, k1, s0, s1, p0, p1);
  1042. return out;
  1043. }
  1044. };
  1045. // GGML_OP_IM2COL
  1046. struct test_im2col : public test_case {
  1047. const ggml_type type_input;
  1048. const ggml_type type_kernel;
  1049. const ggml_type dst_type;
  1050. const std::array<int64_t, 4> ne_input;
  1051. const std::array<int64_t, 4> ne_kernel;
  1052. // stride
  1053. const int s0;
  1054. const int s1;
  1055. // padding
  1056. const int p0;
  1057. const int p1;
  1058. // dilatation
  1059. const int d0;
  1060. const int d1;
  1061. // mode
  1062. const bool is_2D;
  1063. std::string vars() override {
  1064. return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  1065. }
  1066. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32,
  1067. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1068. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1069. int s0 = 1, int s1 = 1,
  1070. int p0 = 1, int p1 = 1,
  1071. int d0 = 1, int d1 = 1,
  1072. bool is_2D = true)
  1073. : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  1074. ggml_tensor * build_graph(ggml_context * ctx) override {
  1075. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1076. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  1077. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D, dst_type);
  1078. return out;
  1079. }
  1080. };
  1081. // GGML_OP_CONCAT
  1082. struct test_concat : public test_case {
  1083. const ggml_type type;
  1084. const std::array<int64_t, 4> ne_a;
  1085. const int64_t ne_b_d;
  1086. const int dim;
  1087. const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b)
  1088. std::string vars() override {
  1089. return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v);
  1090. }
  1091. test_concat(ggml_type type = GGML_TYPE_F32,
  1092. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1093. int64_t ne_b_d = 10,
  1094. int dim = 2, int v = 0)
  1095. : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {}
  1096. ggml_tensor * build_graph(ggml_context * ctx) override {
  1097. auto ne_b = ne_a;
  1098. ne_b[dim] = ne_b_d;
  1099. ggml_tensor * a;
  1100. if (v & 1) {
  1101. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1102. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1103. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1104. } else {
  1105. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1106. }
  1107. ggml_tensor * b;
  1108. if (v & 2) {
  1109. auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4;
  1110. b = ggml_new_tensor(ctx, type, 4, ne.data());
  1111. b = ggml_view_4d(ctx, b, ne_b[0], ne_b[1], ne_b[2], ne_b[3], b->nb[1], b->nb[2], b->nb[3], 0);
  1112. } else {
  1113. b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1114. }
  1115. ggml_tensor * out = ggml_concat(ctx, a, b, dim);
  1116. return out;
  1117. }
  1118. };
  1119. // GGML_OP_ARGSORT
  1120. struct test_argsort : public test_case {
  1121. const ggml_type type;
  1122. const std::array<int64_t, 4> ne;
  1123. ggml_sort_order order;
  1124. std::string vars() override {
  1125. return VARS_TO_STR3(type, ne, order);
  1126. }
  1127. test_argsort(ggml_type type = GGML_TYPE_F32,
  1128. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  1129. ggml_sort_order order = GGML_SORT_ORDER_ASC)
  1130. : type(type), ne(ne), order(order) {}
  1131. ggml_tensor * build_graph(ggml_context * ctx) override {
  1132. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1133. ggml_tensor * out = ggml_argsort(ctx, a, order);
  1134. return out;
  1135. }
  1136. void initialize_tensors(ggml_context * ctx) override {
  1137. std::random_device rd;
  1138. std::default_random_engine rng(rd());
  1139. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1140. if (t->type == GGML_TYPE_I32) {
  1141. // indices
  1142. std::vector<int> data(ggml_nelements(t));
  1143. for (int i = 0; i < ggml_nelements(t); i++) {
  1144. data[i] = rand();
  1145. }
  1146. std::shuffle(data.begin(), data.end(), rng);
  1147. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  1148. } else if (t->type == GGML_TYPE_F32) {
  1149. // initialize with unique values to avoid ties
  1150. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1151. std::vector<float> data(t->ne[0]);
  1152. for (int i = 0; i < t->ne[0]; i++) {
  1153. data[i] = i;
  1154. }
  1155. std::shuffle(data.begin(), data.end(), rng);
  1156. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  1157. }
  1158. } else {
  1159. GGML_ASSERT(false);
  1160. }
  1161. }
  1162. }
  1163. };
  1164. // GGML_OP_SUM_ROWS
  1165. struct test_sum_rows : public test_case {
  1166. const ggml_type type;
  1167. const std::array<int64_t, 4> ne;
  1168. std::string vars() override {
  1169. return VARS_TO_STR2(type, ne);
  1170. }
  1171. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  1172. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1173. : type(type), ne(ne) {}
  1174. ggml_tensor * build_graph(ggml_context * ctx) override {
  1175. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1176. ggml_tensor * out = ggml_sum_rows(ctx, a);
  1177. return out;
  1178. }
  1179. };
  1180. // GGML_OP_UPSCALE
  1181. struct test_upscale : public test_case {
  1182. const ggml_type type;
  1183. const std::array<int64_t, 4> ne;
  1184. const int32_t scale_factor;
  1185. const bool transpose;
  1186. std::string vars() override {
  1187. return VARS_TO_STR4(type, ne, scale_factor, transpose);
  1188. }
  1189. test_upscale(ggml_type type = GGML_TYPE_F32,
  1190. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  1191. int32_t scale_factor = 2, bool transpose = false)
  1192. : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {}
  1193. ggml_tensor * build_graph(ggml_context * ctx) override {
  1194. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1195. if (transpose) a = ggml_transpose(ctx, a);
  1196. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
  1197. return out;
  1198. }
  1199. };
  1200. // GGML_OP_UPSCALE (ext)
  1201. struct test_upscale_ext : public test_case {
  1202. const ggml_type type;
  1203. const std::array<int64_t, 4> ne;
  1204. const std::array<int64_t, 4> ne_tgt;
  1205. std::string vars() override {
  1206. return VARS_TO_STR3(type, ne, ne_tgt);
  1207. }
  1208. test_upscale_ext(ggml_type type = GGML_TYPE_F32,
  1209. std::array<int64_t, 4> ne = {2, 5, 7, 11},
  1210. std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13})
  1211. : type(type), ne(ne), ne_tgt(ne_tgt) {}
  1212. ggml_tensor * build_graph(ggml_context * ctx) override {
  1213. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1214. ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]);
  1215. return out;
  1216. }
  1217. };
  1218. // GGML_OP_GROUP_NORM
  1219. struct test_group_norm : public test_case {
  1220. const ggml_type type;
  1221. const std::array<int64_t, 4> ne;
  1222. const int32_t num_groups;
  1223. std::string vars() override {
  1224. return VARS_TO_STR3(type, ne, num_groups);
  1225. }
  1226. test_group_norm(ggml_type type = GGML_TYPE_F32,
  1227. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  1228. int32_t num_groups = 32)
  1229. : type(type), ne(ne), num_groups(num_groups) {}
  1230. ggml_tensor * build_graph(ggml_context * ctx) override {
  1231. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1232. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups);
  1233. return out;
  1234. }
  1235. };
  1236. // GGML_OP_ACC
  1237. struct test_acc : public test_case {
  1238. const ggml_type type;
  1239. const std::array<int64_t, 4> ne_a;
  1240. const std::array<int64_t, 4> ne_b;
  1241. std::string vars() override {
  1242. return VARS_TO_STR3(type, ne_a, ne_b);
  1243. }
  1244. test_acc(ggml_type type = GGML_TYPE_F32,
  1245. std::array<int64_t, 4> ne_a = {1024, 577, 1, 1},
  1246. std::array<int64_t, 4> ne_b = {1024, 576, 1, 1})
  1247. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1248. ggml_tensor * build_graph(ggml_context * ctx) override {
  1249. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1250. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1251. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  1252. return out;
  1253. }
  1254. };
  1255. // GGML_OP_PAD
  1256. struct test_pad : public test_case {
  1257. const ggml_type type;
  1258. const std::array<int64_t, 4> ne_a;
  1259. const int pad_0;
  1260. const int pad_1;
  1261. std::string vars() override {
  1262. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  1263. }
  1264. test_pad(ggml_type type = GGML_TYPE_F32,
  1265. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  1266. int pad_0 = 1, int pad_1 = 1)
  1267. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  1268. ggml_tensor * build_graph(ggml_context * ctx) override {
  1269. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1270. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  1271. return out;
  1272. }
  1273. };
  1274. // GGML_OP_ARANGE
  1275. struct test_arange : public test_case {
  1276. const ggml_type type;
  1277. const float start;
  1278. const float stop;
  1279. const float step;
  1280. std::string vars() override {
  1281. return VARS_TO_STR4(type, start, stop, step);
  1282. }
  1283. test_arange(ggml_type type = GGML_TYPE_F32,
  1284. float start = 0.f, float stop = 10.f, float step = 1.f)
  1285. : type(type), start(start), stop(stop), step(step) {}
  1286. ggml_tensor * build_graph(ggml_context * ctx) override {
  1287. ggml_tensor * out = ggml_arange(ctx, start, stop, step);
  1288. return out;
  1289. }
  1290. };
  1291. // GGML_OP_TIMESTEP_EMBEDDING
  1292. struct test_timestep_embedding : public test_case {
  1293. const ggml_type type;
  1294. const std::array<int64_t, 4> ne_a;
  1295. const int dim;
  1296. const int max_period;
  1297. std::string vars() override {
  1298. return VARS_TO_STR4(type, ne_a, dim, max_period);
  1299. }
  1300. test_timestep_embedding(ggml_type type = GGML_TYPE_F32,
  1301. std::array<int64_t, 4> ne_a = {2, 1, 1, 1},
  1302. int dim = 320, int max_period=10000)
  1303. : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {}
  1304. ggml_tensor * build_graph(ggml_context * ctx) override {
  1305. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1306. ggml_tensor * out = ggml_timestep_embedding(ctx, a, dim, max_period);
  1307. return out;
  1308. }
  1309. };
  1310. // GGML_OP_LEAKY_RELU
  1311. struct test_leaky_relu : public test_case {
  1312. const ggml_type type;
  1313. const std::array<int64_t, 4> ne_a;
  1314. const float negative_slope;
  1315. std::string vars() override {
  1316. return VARS_TO_STR3(type, ne_a, negative_slope);
  1317. }
  1318. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  1319. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1320. float negative_slope = 0.1f)
  1321. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  1322. ggml_tensor * build_graph(ggml_context * ctx) override {
  1323. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1324. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  1325. return out;
  1326. }
  1327. };
  1328. // GGML_OP_FLASH_ATTN_EXT
  1329. struct test_flash_attn_ext : public test_case {
  1330. const int64_t hs; // head size
  1331. const int64_t nh; // num heads
  1332. const int64_t kv; // kv size
  1333. const int64_t nb; // batch size
  1334. const bool mask; // use mask
  1335. const float max_bias; // ALiBi
  1336. const ggml_type type_KV;
  1337. std::string vars() override {
  1338. return VARS_TO_STR7(hs, nh, kv, nb, mask, max_bias, type_KV);
  1339. }
  1340. double max_nmse_err() override {
  1341. return 5e-4;
  1342. }
  1343. test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, bool mask = true, float max_bias = 0.0f, ggml_type type_KV = GGML_TYPE_F16)
  1344. : hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias), type_KV(type_KV) {}
  1345. ggml_tensor * build_graph(ggml_context * ctx) override {
  1346. const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV));
  1347. ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs_padded, nb, nh, 1);
  1348. ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1349. ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1350. ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr;
  1351. ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias);
  1352. return out;
  1353. }
  1354. };
  1355. enum llm_norm_type {
  1356. LLM_NORM,
  1357. LLM_NORM_RMS,
  1358. };
  1359. struct llama_hparams {
  1360. uint32_t n_vocab;
  1361. uint32_t n_embd;
  1362. uint32_t n_head;
  1363. uint32_t n_head_kv;
  1364. static constexpr uint32_t n_layer = 1;
  1365. uint32_t n_rot;
  1366. uint32_t n_embd_head; // dimension of values (d_v)
  1367. uint32_t n_ff;
  1368. float f_norm_eps;
  1369. float f_norm_rms_eps;
  1370. // cparams
  1371. static constexpr uint32_t n_ctx = 512; // user-specified context size
  1372. static constexpr uint32_t n_ctx_orig = n_ctx;
  1373. // batch
  1374. int32_t n_tokens;
  1375. // llm_build_context
  1376. static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx
  1377. static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache
  1378. uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads
  1379. return n_embd_head * n_head_kv;
  1380. }
  1381. };
  1382. // LLM base class
  1383. struct test_llm : public test_case {
  1384. llama_hparams hp;
  1385. protected:
  1386. test_llm(llama_hparams hp)
  1387. : hp(std::move(hp)) {
  1388. }
  1389. public:
  1390. struct ggml_tensor * llm_build_norm(
  1391. struct ggml_context * ctx,
  1392. struct ggml_tensor * cur,
  1393. struct ggml_tensor * mw,
  1394. struct ggml_tensor * mb,
  1395. llm_norm_type type) {
  1396. switch (type) {
  1397. case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break;
  1398. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break;
  1399. }
  1400. cur = ggml_mul(ctx, cur, mw);
  1401. if (mb) {
  1402. cur = ggml_add(ctx, cur, mb);
  1403. }
  1404. return cur;
  1405. }
  1406. void llm_build_kv_store(
  1407. struct ggml_context * ctx,
  1408. struct ggml_tensor * k_l,
  1409. struct ggml_tensor * v_l,
  1410. struct ggml_tensor * k_cur,
  1411. struct ggml_tensor * v_cur) {
  1412. // compute the transposed [n_tokens, n_embd] V matrix
  1413. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens));
  1414. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(),
  1415. (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head);
  1416. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(),
  1417. ( hp.n_ctx)*ggml_element_size(v_l),
  1418. (hp.kv_head)*ggml_element_size(v_l));
  1419. // important: storing RoPE-ed version of K in the KV cache!
  1420. ggml_cpy(ctx, k_cur, k_cache_view);
  1421. ggml_cpy(ctx, v_cur_t, v_cache_view);
  1422. }
  1423. struct ggml_tensor * llm_build_kqv(
  1424. struct ggml_context * ctx,
  1425. struct ggml_tensor * k_l,
  1426. struct ggml_tensor * v_l,
  1427. struct ggml_tensor * q_cur,
  1428. struct ggml_tensor * kq_mask,
  1429. float kq_scale) {
  1430. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  1431. struct ggml_tensor * k =
  1432. ggml_view_3d(ctx, k_l,
  1433. hp.n_embd_head, hp.n_kv, hp.n_head_kv,
  1434. ggml_row_size(k_l->type, hp.n_embd_gqa()),
  1435. ggml_row_size(k_l->type, hp.n_embd_head),
  1436. 0);
  1437. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  1438. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
  1439. // split cached v into n_head heads
  1440. struct ggml_tensor * v =
  1441. ggml_view_3d(ctx, v_l,
  1442. hp.n_kv, hp.n_embd_head, hp.n_head_kv,
  1443. ggml_element_size(v_l)*hp.n_ctx,
  1444. ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head,
  1445. 0);
  1446. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  1447. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  1448. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens);
  1449. struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1450. cur = ggml_mul_mat(ctx, wo, cur);
  1451. return cur;
  1452. }
  1453. void initialize_tensors(ggml_context * ctx) override {
  1454. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1455. if (t->type == GGML_TYPE_I32) {
  1456. // pos
  1457. std::vector<int> data(hp.n_tokens);
  1458. for (int i = 0; i < hp.n_tokens; i++) {
  1459. data[i] = rand() % hp.n_ctx;
  1460. }
  1461. ggml_backend_tensor_set(t, data.data(), 0, hp.n_tokens * sizeof(int));
  1462. } else {
  1463. init_tensor_uniform(t);
  1464. }
  1465. }
  1466. }
  1467. };
  1468. // Llama
  1469. struct test_llama : public test_llm {
  1470. static constexpr float freq_base = 10000.0f;
  1471. static constexpr float freq_scale = 1.0f;
  1472. static constexpr float ext_factor = 0.0f;
  1473. static constexpr float attn_factor = 1.0f;
  1474. static constexpr float beta_fast = 32.0f;
  1475. static constexpr float beta_slow = 1.0f;
  1476. std::string op_desc(ggml_tensor * t) override {
  1477. GGML_UNUSED(t);
  1478. return "LLAMA";
  1479. }
  1480. std::string vars() override {
  1481. auto n_tokens = hp.n_tokens;
  1482. return VARS_TO_STR1(n_tokens);
  1483. }
  1484. double max_nmse_err() override {
  1485. return 2e-3;
  1486. }
  1487. test_llama(int n_tokens = 1)
  1488. : test_llm({
  1489. /*n_vocab =*/ 32000,
  1490. /*n_embd =*/ 3200,
  1491. /*n_head =*/ 32,
  1492. /*n_head_kv =*/ 32,
  1493. /*n_rot =*/ 100,
  1494. /*n_embd_head =*/ 100,
  1495. /*n_ff =*/ 8640,
  1496. /*f_norm_eps =*/ 0.f,
  1497. /*f_norm_rms_eps =*/ 1e-5f,
  1498. /*n_tokens =*/ n_tokens,
  1499. }) {
  1500. }
  1501. ggml_tensor * build_graph(ggml_context * ctx) override {
  1502. struct ggml_tensor * cur;
  1503. struct ggml_tensor * inpL;
  1504. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1505. // inp_pos - contains the positions
  1506. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1507. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1508. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1509. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1510. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1511. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1512. struct ggml_tensor * inpSA = inpL;
  1513. // norm
  1514. ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1515. cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS);
  1516. // self-attention
  1517. {
  1518. ggml_tensor * wq = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1519. ggml_tensor * wk = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1520. ggml_tensor * wv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1521. // compute Q and K and RoPE them
  1522. struct ggml_tensor * Qcur = ggml_mul_mat(ctx, wq, cur);
  1523. struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur);
  1524. struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur);
  1525. Qcur = ggml_rope_ext(
  1526. ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr,
  1527. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1528. ext_factor, attn_factor, beta_fast, beta_slow
  1529. );
  1530. Kcur = ggml_rope_ext(
  1531. ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr,
  1532. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1533. ext_factor, attn_factor, beta_fast, beta_slow
  1534. );
  1535. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1536. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1537. }
  1538. struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA);
  1539. // feed-forward network
  1540. ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1541. cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS);
  1542. ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1543. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1544. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1545. struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur);
  1546. cur = ggml_mul_mat(ctx, ffn_gate, cur);
  1547. cur = ggml_silu(ctx, cur);
  1548. cur = ggml_mul(ctx, cur, tmp);
  1549. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1550. cur = ggml_add(ctx, cur, ffn_inp);
  1551. // input for next layer
  1552. inpL = cur;
  1553. }
  1554. cur = inpL;
  1555. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1556. cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS);
  1557. // lm_head
  1558. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab);
  1559. cur = ggml_mul_mat(ctx, output, cur);
  1560. return cur;
  1561. }
  1562. };
  1563. // Falcon
  1564. struct test_falcon : public test_llm {
  1565. static constexpr float freq_base = 10000.0f;
  1566. static constexpr float freq_scale = 1.0f;
  1567. static constexpr float ext_factor = 0.0f;
  1568. static constexpr float attn_factor = 1.0f;
  1569. static constexpr float beta_fast = 32.0f;
  1570. static constexpr float beta_slow = 1.0f;
  1571. std::string op_desc(ggml_tensor * t) override {
  1572. GGML_UNUSED(t);
  1573. return "FALCON";
  1574. }
  1575. std::string vars() override {
  1576. auto n_tokens = hp.n_tokens;
  1577. return VARS_TO_STR1(n_tokens);
  1578. }
  1579. double max_nmse_err() override {
  1580. return 2e-3;
  1581. }
  1582. test_falcon(int n_tokens = 1)
  1583. : test_llm({
  1584. /*n_vocab =*/ 32000,
  1585. /*n_embd =*/ 3200,
  1586. /*n_head =*/ 50,
  1587. /*n_head_kv =*/ 1,
  1588. /*n_rot =*/ 64,
  1589. /*n_embd_head =*/ 64,
  1590. /*n_ff =*/ 8640,
  1591. /*f_norm_eps =*/ 1e-5f,
  1592. /*f_norm_rms_eps =*/ 0.f,
  1593. /*n_tokens =*/ n_tokens,
  1594. }) {
  1595. }
  1596. ggml_tensor * build_graph(ggml_context * ctx) override {
  1597. struct ggml_tensor * cur;
  1598. struct ggml_tensor * inpL;
  1599. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1600. // inp_pos - contains the positions
  1601. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1602. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1603. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1604. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1605. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1606. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1607. // norm
  1608. ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1609. ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1610. ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM);
  1611. // self-attention
  1612. {
  1613. cur = attn_norm;
  1614. ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa());
  1615. cur = ggml_mul_mat(ctx, wqkv, cur);
  1616. struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd)));
  1617. struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd)));
  1618. struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa())));
  1619. Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens);
  1620. Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens);
  1621. // using mode = 2 for neox mode
  1622. Qcur = ggml_rope_ext(
  1623. ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1624. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1625. );
  1626. Kcur = ggml_rope_ext(
  1627. ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1628. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1629. );
  1630. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1631. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1632. }
  1633. struct ggml_tensor * ffn_inp = cur;
  1634. // feed forward
  1635. {
  1636. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1637. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1638. cur = attn_norm;
  1639. cur = ggml_mul_mat(ctx, ffn_up, cur);
  1640. cur = ggml_gelu(ctx, cur);
  1641. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1642. }
  1643. cur = ggml_add(ctx, cur, ffn_inp);
  1644. cur = ggml_add(ctx, cur, inpL);
  1645. // input for next layer
  1646. inpL = cur;
  1647. }
  1648. cur = inpL;
  1649. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1650. ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1651. cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM);
  1652. // lm_head
  1653. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab);
  1654. cur = ggml_mul_mat(ctx, output, cur);
  1655. return cur;
  1656. }
  1657. };
  1658. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
  1659. std::vector<std::unique_ptr<test_case>> test_cases;
  1660. std::default_random_engine rng(0);
  1661. const ggml_type all_types[] = {
  1662. GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
  1663. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  1664. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1665. GGML_TYPE_Q8_0,
  1666. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1667. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  1668. GGML_TYPE_Q6_K,
  1669. GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1670. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1671. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1672. };
  1673. const ggml_type base_types[] = {
  1674. GGML_TYPE_F32, GGML_TYPE_F16,
  1675. GGML_TYPE_Q4_0,
  1676. GGML_TYPE_Q4_K,
  1677. GGML_TYPE_IQ2_XXS
  1678. };
  1679. const ggml_type other_types[] = {
  1680. GGML_TYPE_Q4_1,
  1681. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1682. GGML_TYPE_Q8_0,
  1683. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1684. GGML_TYPE_Q5_K,
  1685. GGML_TYPE_Q6_K,
  1686. GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1687. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1688. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1689. };
  1690. // unary ops
  1691. for (int v : {0, 1}) {
  1692. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  1693. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 128, 10, 10, 10 }, v));
  1694. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 7, 13, 19, 23 }, v));
  1695. }
  1696. }
  1697. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  1698. for (ggml_type type : all_types) {
  1699. for (int b : {1, 7}) {
  1700. for (bool v : {false, true}) {
  1701. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  1702. }
  1703. }
  1704. }
  1705. for (int b : {1, 7}) {
  1706. for (bool v : {false, true}) {
  1707. test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v));
  1708. }
  1709. }
  1710. for (ggml_type type_input : {GGML_TYPE_F32}) {
  1711. for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
  1712. for (int k0 : {1, 3}) {
  1713. for (int k1 : {1, 3}) {
  1714. for (int s0 : {1, 2}) {
  1715. for (int s1 : {1, 2}) {
  1716. for (int p0 : {0, 1}) {
  1717. for (int p1 : {0, 1}) {
  1718. test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1));
  1719. }
  1720. }
  1721. }
  1722. }
  1723. }
  1724. }
  1725. }
  1726. }
  1727. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
  1728. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
  1729. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
  1730. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1731. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
  1732. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 2, 1}));
  1733. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1734. test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1735. test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1736. test_cases.emplace_back(new test_dup(GGML_TYPE_F32));
  1737. test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
  1738. test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
  1739. test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
  1740. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
  1741. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
  1742. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  1743. for (ggml_type type_dst : all_types) {
  1744. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  1745. }
  1746. }
  1747. test_cases.emplace_back(new test_cont());
  1748. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  1749. for (auto op : {ggml_add, ggml_mul, ggml_div}) {
  1750. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  1751. }
  1752. };
  1753. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 8, 1}, {1, 1, 1, 1});
  1754. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1, 1}, {32, 1, 1, 1});
  1755. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 320, 320}, {1, 1, 1, 1});
  1756. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 1, 1}, {1, 1, 1, 1});
  1757. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 1}, {1, 1, 1, 1});
  1758. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 1});
  1759. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 1, 1, 1});
  1760. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 1, 1});
  1761. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 1});
  1762. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 2});
  1763. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 2});
  1764. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 2, 2});
  1765. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 2, 2, 2});
  1766. // stable diffusion
  1767. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 1, 1, 1});
  1768. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 16, 16, 1});
  1769. add_test_bin_bcast(GGML_TYPE_F32, {1280, 16, 16, 1}, {1, 1, 1, 1});
  1770. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 256, 1, 1});
  1771. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {16, 16, 1, 1});
  1772. add_test_bin_bcast(GGML_TYPE_F32, {16, 16, 1280, 1}, {1, 1, 1, 1});
  1773. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {16, 16, 1, 1});
  1774. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 2560, 1}, {16, 16, 1, 1});
  1775. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {32, 32, 1, 1});
  1776. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {32, 32, 1, 1});
  1777. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 640, 1}, {32, 32, 1, 1});
  1778. add_test_bin_bcast(GGML_TYPE_F32, {5120, 1, 1, 1}, {1, 256, 1, 1});
  1779. add_test_bin_bcast(GGML_TYPE_F32, {640, 1, 1, 1}, {1, 1, 1, 1});
  1780. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  1781. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  1782. test_cases.emplace_back(new test_scale());
  1783. for (float eps : {1e-6f, 1e-5f, 1e-3f, 1e-1f}) {
  1784. test_cases.emplace_back(new test_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1785. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1786. }
  1787. for (ggml_type type_a : base_types) {
  1788. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1789. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1790. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1}));
  1791. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1}));
  1792. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1}));
  1793. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1}));
  1794. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2}));
  1795. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2}));
  1796. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1}));
  1797. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1}));
  1798. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1}));
  1799. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1}));
  1800. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1}));
  1801. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2}));
  1802. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
  1803. }
  1804. }
  1805. for (ggml_type type_a : other_types) {
  1806. for (ggml_type type_b : {GGML_TYPE_F32}) {
  1807. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1808. }
  1809. }
  1810. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
  1811. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
  1812. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
  1813. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
  1814. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
  1815. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
  1816. for (ggml_type type_a : base_types) {
  1817. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1818. for (int n_mats : {4, 8}) {
  1819. for (int n_used : {1, 2, 4}) {
  1820. for (bool b : {false, true}) {
  1821. for (int n : {1, 32}) {
  1822. int m = 512;
  1823. int k = 256;
  1824. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  1825. }
  1826. }
  1827. }
  1828. }
  1829. }
  1830. }
  1831. for (ggml_type type_a : other_types) {
  1832. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1833. for (int n_mats : {4}) {
  1834. for (int n_used : {2}) {
  1835. for (bool b : {false}) {
  1836. for (int n : {1}) {
  1837. int m = 512;
  1838. int k = 256;
  1839. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  1840. }
  1841. }
  1842. }
  1843. }
  1844. }
  1845. }
  1846. test_cases.emplace_back(new test_sqr());
  1847. test_cases.emplace_back(new test_clamp());
  1848. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  1849. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 1}, 5));
  1850. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 10}, 5));
  1851. #if 0
  1852. std::uniform_int_distribution<> dist_ne1(1, 50);
  1853. int exponent = 1;
  1854. while (exponent < (1 << 17)) {
  1855. std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
  1856. for (int n = 0; n < 10; ++n) {
  1857. int64_t ne0 = dist_ne0(rng);
  1858. int64_t ne1 = dist_ne1(rng);
  1859. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
  1860. }
  1861. exponent <<= 1;
  1862. }
  1863. #endif
  1864. for (bool mask : {false, true}) {
  1865. for (float max_bias : {0.0f, 8.0f}) {
  1866. if (!mask && max_bias > 0.0f) continue;
  1867. for (float scale : {1.0f, 0.1f}) {
  1868. for (int64_t ne0 : {16, 1024}) {
  1869. for (int64_t ne1 : {16, 1024}) {
  1870. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias));
  1871. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias));
  1872. }
  1873. }
  1874. }
  1875. }
  1876. }
  1877. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
  1878. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
  1879. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
  1880. {
  1881. bool all = true;
  1882. for (float v : { 0, 1 }) {
  1883. for (float fs : { 1.0f, 1.4245f }) {
  1884. for (float ef : { 0.0f, 0.7465f }) {
  1885. for (float af : { 1.0f, 1.4245f }) {
  1886. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1887. for (bool ff : {false, true}) { // freq_factors
  1888. test_cases.emplace_back(new test_rope(type, {128, 32, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 7B
  1889. if (all) {
  1890. test_cases.emplace_back(new test_rope(type, {128, 40, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 13B
  1891. test_cases.emplace_back(new test_rope(type, {128, 52, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 30B
  1892. test_cases.emplace_back(new test_rope(type, {128, 64, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 65B
  1893. }
  1894. if (all) {
  1895. test_cases.emplace_back(new test_rope(type, { 64, 1, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  1896. test_cases.emplace_back(new test_rope(type, { 64, 71, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  1897. test_cases.emplace_back(new test_rope(type, { 64, 8, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  1898. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 20, 2, 512, fs, ef, af, ff, v)); // neox (stablelm)
  1899. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 32, 2, 512, fs, ef, af, ff, v)); // neox (phi-2)
  1900. }
  1901. test_cases.emplace_back(new test_rope(type, { 64, 128, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  1902. }
  1903. }
  1904. all = false;
  1905. }
  1906. }
  1907. }
  1908. }
  1909. }
  1910. for (int v : { 0, 1, 2, 3 }) {
  1911. for (int dim : { 0, 1, 2, 3, }) {
  1912. test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
  1913. test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
  1914. }
  1915. }
  1916. for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) {
  1917. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  1918. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  1919. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen
  1920. }
  1921. test_cases.emplace_back(new test_sum_rows());
  1922. test_cases.emplace_back(new test_upscale());
  1923. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
  1924. test_cases.emplace_back(new test_upscale_ext());
  1925. test_cases.emplace_back(new test_group_norm());
  1926. test_cases.emplace_back(new test_acc());
  1927. test_cases.emplace_back(new test_pad());
  1928. test_cases.emplace_back(new test_arange());
  1929. test_cases.emplace_back(new test_timestep_embedding());
  1930. test_cases.emplace_back(new test_leaky_relu());
  1931. for (int hs : { 64, 80, 128, 256, }) {
  1932. for (bool mask : { true, false } ) {
  1933. for (float max_bias : { 0.0f, 8.0f }) {
  1934. if (!mask && max_bias > 0.0f) continue;
  1935. for (int nh : { 32, }) {
  1936. for (int kv : { 512, 1024, }) {
  1937. for (int nb : { 1, 2, 4, 8, }) {
  1938. for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
  1939. test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias, type_KV));
  1940. }
  1941. }
  1942. }
  1943. }
  1944. }
  1945. }
  1946. }
  1947. // these tests are disabled to save execution time, but they can be handy for debugging
  1948. #if 0
  1949. test_cases.emplace_back(new test_llama(1));
  1950. test_cases.emplace_back(new test_llama(2));
  1951. test_cases.emplace_back(new test_falcon(1));
  1952. test_cases.emplace_back(new test_falcon(2));
  1953. #endif
  1954. // run tests
  1955. if (mode == MODE_TEST) {
  1956. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  1957. size_t n_ok = 0;
  1958. for (auto & test : test_cases) {
  1959. if (test->eval(backend, backend_cpu, op_name)) {
  1960. n_ok++;
  1961. }
  1962. }
  1963. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  1964. ggml_backend_free(backend_cpu);
  1965. return n_ok == test_cases.size();
  1966. }
  1967. if (mode == MODE_PERF) {
  1968. for (auto & test : test_cases) {
  1969. test->eval_perf(backend, op_name);
  1970. }
  1971. return true;
  1972. }
  1973. GGML_ASSERT(false);
  1974. return false;
  1975. }
  1976. static void usage(char ** argv) {
  1977. printf("Usage: %s [mode] [-o op] [-b backend]\n", argv[0]);
  1978. printf(" valid modes are: test (compare with CPU backend for correctness) or perf (performance evaluation)\n");
  1979. printf(" op names are as given by ggml_op_desc()\n");
  1980. }
  1981. int main(int argc, char ** argv) {
  1982. test_mode mode = MODE_TEST;
  1983. const char * op_name_filter = NULL;
  1984. const char * backend_filter = NULL;
  1985. for (int i = 1; i < argc; i++) {
  1986. if (strcmp(argv[i], "test") == 0) {
  1987. mode = MODE_TEST;
  1988. } else if (strcmp(argv[i], "perf") == 0) {
  1989. mode = MODE_PERF;
  1990. } else if (strcmp(argv[i], "-o") == 0) {
  1991. if (i + 1 < argc) {
  1992. op_name_filter = argv[++i];
  1993. } else {
  1994. usage(argv);
  1995. return 1;
  1996. }
  1997. } else if (strcmp(argv[i], "-b") == 0) {
  1998. if (i + 1 < argc) {
  1999. backend_filter = argv[++i];
  2000. } else {
  2001. usage(argv);
  2002. return 1;
  2003. }
  2004. } else {
  2005. usage(argv);
  2006. return 1;
  2007. }
  2008. }
  2009. // enumerate backends
  2010. printf("Testing %zu backends\n\n", ggml_backend_reg_get_count());
  2011. size_t n_ok = 0;
  2012. for (size_t i = 0; i < ggml_backend_reg_get_count(); i++) {
  2013. printf("Backend %zu/%zu (%s)\n", i + 1, ggml_backend_reg_get_count(), ggml_backend_reg_get_name(i));
  2014. if (backend_filter != NULL && strcmp(backend_filter, ggml_backend_reg_get_name(i)) != 0) {
  2015. printf(" Skipping\n");
  2016. n_ok++;
  2017. continue;
  2018. }
  2019. ggml_backend_t backend = ggml_backend_reg_init_backend(i, NULL);
  2020. GGML_ASSERT(backend != NULL);
  2021. if (backend_filter == NULL && ggml_backend_is_cpu(backend)) {
  2022. printf(" Skipping CPU backend\n");
  2023. ggml_backend_free(backend);
  2024. n_ok++;
  2025. continue;
  2026. }
  2027. printf(" Backend name: %s\n", ggml_backend_name(backend));
  2028. bool ok = test_backend(backend, mode, op_name_filter);
  2029. printf(" Backend %s: ", ggml_backend_name(backend));
  2030. if (ok) {
  2031. printf("\033[1;32mOK\033[0m\n");
  2032. n_ok++;
  2033. } else {
  2034. printf("\033[1;31mFAIL\033[0m\n");
  2035. }
  2036. printf("\n");
  2037. ggml_backend_free(backend);
  2038. }
  2039. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_reg_get_count());
  2040. if (n_ok != ggml_backend_reg_get_count()) {
  2041. printf("\033[1;31mFAIL\033[0m\n");
  2042. return 1;
  2043. }
  2044. ggml_quantize_free();
  2045. printf("\033[1;32mOK\033[0m\n");
  2046. return 0;
  2047. }