test-backend-ops.cpp 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715
  1. #include <ggml.h>
  2. #include <ggml-alloc.h>
  3. #include <ggml-backend.h>
  4. #include <algorithm>
  5. #include <array>
  6. #include <cfloat>
  7. #include <cstring>
  8. #include <functional>
  9. #include <memory>
  10. #include <random>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <string>
  14. #include <thread>
  15. #include <vector>
  16. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  17. // static RNG initialization (revisit if n_threads stops being constant)
  18. static const size_t n_threads = std::thread::hardware_concurrency();
  19. static std::vector<std::default_random_engine> generators = []() {
  20. std::random_device rd;
  21. std::vector<std::default_random_engine> vec;
  22. vec.reserve(n_threads);
  23. //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
  24. for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
  25. return vec;
  26. }();
  27. size_t size = ggml_nelements(tensor);
  28. std::vector<float> data(size);
  29. auto init_thread = [&](size_t ith, size_t start, size_t end) {
  30. std::uniform_real_distribution<float> distribution(min, max);
  31. for (size_t i = start; i < end; i++) {
  32. data[i] = distribution(generators[ith]);
  33. }
  34. };
  35. std::vector<std::thread> threads;
  36. threads.reserve(n_threads);
  37. for (size_t i = 0; i < n_threads; i++) {
  38. size_t start = i*size/n_threads;
  39. size_t end = (i+1)*size/n_threads;
  40. threads.emplace_back(init_thread, i, start, end);
  41. }
  42. for (auto & t : threads) {
  43. t.join();
  44. }
  45. #if 0
  46. const char * val_str = getenv("GGML_TEST_EPS");
  47. float val = 1e-9f;
  48. if (val_str != nullptr) {
  49. val = std::stof(val_str);
  50. printf("GGML_TEST_EPS=%e\n", val);
  51. }
  52. // test quantization with very small values that may result in nan scales due to division by zero
  53. if (ggml_is_quantized(tensor->type)) {
  54. for (int i = 0; i < 256; i++) {
  55. data[i] = val;
  56. }
  57. }
  58. #endif
  59. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  60. ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
  61. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
  62. GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
  63. std::vector<uint8_t> dataq(ggml_row_size(tensor->type, size));
  64. std::vector<float> imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
  65. const float * im = imatrix.data();
  66. if (!ggml_quantize_requires_imatrix(tensor->type)) {
  67. // when the imatrix is optional, we want to test both quantization with and without imatrix
  68. // use one of the random numbers to decide
  69. if (data[0] > 0.5f*(min + max)) {
  70. im = nullptr;
  71. }
  72. }
  73. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
  74. GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
  75. // TODO: other cases
  76. //#pragma omp parallel for
  77. //for (int i = 0; i < tensor->ne[1]; i++) {
  78. // ggml_quantize_chunk(tensor->type, data.data(), dataq.data(),
  79. // i * tensor->ne[0], 1, tensor->ne[0], im);
  80. //}
  81. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  82. } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
  83. // This is going to create some weird integers though.
  84. ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
  85. } else {
  86. GGML_ABORT("fatal error");
  87. }
  88. }
  89. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  90. std::vector<float> tv;
  91. tv.reserve(ggml_nelements(t));
  92. std::vector<uint8_t> buf(ggml_nbytes(t));
  93. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  94. ggml_type_traits_t tt = ggml_internal_get_type_traits(t->type);
  95. size_t bs = ggml_blck_size(t->type);
  96. std::vector<float> vq(ggml_blck_size(t->type));
  97. bool quantized = ggml_is_quantized(t->type);
  98. // access elements by index to avoid gaps in views
  99. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  100. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  101. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  102. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  103. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  104. if (t->type == GGML_TYPE_F16) {
  105. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  106. } else if (t->type == GGML_TYPE_BF16) {
  107. tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
  108. } else if (t->type == GGML_TYPE_F32) {
  109. tv.push_back(*(float *) &buf[i]);
  110. } else if (t->type == GGML_TYPE_I32) {
  111. tv.push_back((float)*(int32_t *) &buf[i]);
  112. } else if (t->type == GGML_TYPE_I16) {
  113. tv.push_back((float)*(int16_t *) &buf[i]);
  114. } else if (t->type == GGML_TYPE_I8) {
  115. tv.push_back((float)*(int8_t *) &buf[i]);
  116. } else if (quantized) {
  117. tt.to_float(&buf[i], vq.data(), bs);
  118. tv.insert(tv.end(), vq.begin(), vq.end());
  119. } else {
  120. GGML_ABORT("fatal error");
  121. }
  122. }
  123. }
  124. }
  125. }
  126. return tv;
  127. }
  128. /*
  129. static double cosine_similarity(const float * v1, const float * v2, size_t n) {
  130. double dot = 0.0;
  131. double mag1 = 0.0;
  132. double mag2 = 0.0;
  133. for (size_t i = 0; i < n; i++) {
  134. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  135. return -1.0f;
  136. }
  137. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  138. continue;
  139. }
  140. dot += v1[i]*v2[i];
  141. mag1 += v1[i]*v1[i];
  142. mag2 += v2[i]*v2[i];
  143. }
  144. return dot/sqrt(mag1*mag2);
  145. }
  146. static float distance(const float * v1, const float * v2, size_t n) {
  147. double d = 0.0;
  148. for (size_t i = 0; i < n; i++) {
  149. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  150. return INFINITY;
  151. }
  152. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  153. continue;
  154. }
  155. d += (v1[i] - v2[i])*(v1[i] - v2[i]);
  156. }
  157. return sqrt(d);
  158. }
  159. static float vec_len(const float * v, size_t n) {
  160. double d = 0.0;
  161. for (size_t i = 0; i < n; i++) {
  162. if (std::isnan(v[i])) {
  163. return INFINITY;
  164. }
  165. if (std::isinf(v[i])) {
  166. continue;
  167. }
  168. d += v[i]*v[i];
  169. }
  170. return sqrt(d);
  171. }
  172. */
  173. // normalized mean squared error = mse(a, b) / mse(a, 0)
  174. static double nmse(const float * a, const float * b, size_t n) {
  175. double mse_a_b = 0.0;
  176. double mse_a_0 = 0.0;
  177. for (size_t i = 0; i < n; i++) {
  178. float a_i = a[i];
  179. float b_i = b[i];
  180. mse_a_b += (a_i - b_i) * (a_i - b_i);
  181. mse_a_0 += a_i * a_i;
  182. }
  183. return mse_a_b / mse_a_0;
  184. }
  185. // utils for printing the variables of the test cases
  186. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  187. template<typename T>
  188. static std::string var_to_str(const T & x) {
  189. return std::to_string(x);
  190. }
  191. template<typename T, size_t N>
  192. static std::string var_to_str(const T (&x)[N]) {
  193. std::string s = "[";
  194. for (size_t i = 0; i < N; i++) {
  195. if (i > 0) {
  196. s += ",";
  197. }
  198. s += var_to_str(x[i]);
  199. }
  200. s += "]";
  201. return s;
  202. }
  203. template<typename T, size_t N>
  204. static std::string var_to_str(const std::array<T, N> & x) {
  205. std::string s = "[";
  206. for (size_t i = 0; i < N; i++) {
  207. if (i > 0) {
  208. s += ",";
  209. }
  210. s += var_to_str(x[i]);
  211. }
  212. s += "]";
  213. return s;
  214. }
  215. //static std::string var_to_str(ggml_unary_op unary_op) {
  216. // return ggml_unary_op_name(unary_op);
  217. //}
  218. static std::string var_to_str(ggml_type type) {
  219. return ggml_type_name(type);
  220. }
  221. static std::string var_to_str(ggml_op_pool pool) {
  222. switch (pool) {
  223. case GGML_OP_POOL_AVG: return "avg";
  224. case GGML_OP_POOL_MAX: return "max";
  225. default: return std::to_string(pool);
  226. }
  227. }
  228. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  229. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  230. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  231. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  232. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  233. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  234. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  235. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  236. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  237. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  238. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  239. #define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
  240. #ifdef GGML_USE_SYCL
  241. static bool inline _isinf(float f) {
  242. return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
  243. }
  244. #else
  245. static bool inline _isinf(float f) { return std::isinf(f); }
  246. #endif
  247. // accept FLT_MAX as infinity
  248. static bool isinf_or_max(float f) {
  249. return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  250. }
  251. static bool ggml_is_view_op(enum ggml_op op) {
  252. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  253. }
  254. enum test_mode {
  255. MODE_TEST,
  256. MODE_PERF,
  257. };
  258. struct test_case {
  259. virtual ~test_case() {}
  260. virtual std::string op_desc(ggml_tensor * t) {
  261. return ggml_op_desc(t);
  262. }
  263. virtual std::string vars() {
  264. return "";
  265. }
  266. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  267. virtual double max_nmse_err() {
  268. return 1e-7;
  269. }
  270. virtual void initialize_tensors(ggml_context * ctx) {
  271. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  272. init_tensor_uniform(t);
  273. }
  274. }
  275. virtual size_t op_size(ggml_tensor * t) {
  276. size_t size = ggml_nbytes(t);
  277. // add source tensors
  278. for (int i = 0; i < GGML_MAX_SRC; i++) {
  279. if (t->src[i] != NULL) {
  280. size += ggml_nbytes(t->src[i]);
  281. }
  282. }
  283. return size;
  284. }
  285. ggml_cgraph * gf = nullptr;
  286. static const int sentinel_size = 1024;
  287. test_mode mode;
  288. std::vector<ggml_tensor *> sentinels;
  289. void add_sentinel(ggml_context * ctx) {
  290. if (mode == MODE_PERF) {
  291. return;
  292. }
  293. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  294. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  295. sentinels.push_back(sentinel);
  296. }
  297. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  298. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  299. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  300. add_sentinel(ctx);
  301. return t;
  302. }
  303. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  304. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  305. add_sentinel(ctx);
  306. return t;
  307. }
  308. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  309. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  310. add_sentinel(ctx);
  311. return t;
  312. }
  313. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  314. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  315. add_sentinel(ctx);
  316. return t;
  317. }
  318. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  319. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  320. add_sentinel(ctx);
  321. return t;
  322. }
  323. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  324. mode = MODE_TEST;
  325. ggml_init_params params = {
  326. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  327. /* .mem_base = */ NULL,
  328. /* .no_alloc = */ true,
  329. };
  330. ggml_context * ctx = ggml_init(params);
  331. gf = ggml_new_graph(ctx);
  332. // pre-graph sentinel
  333. add_sentinel(ctx);
  334. ggml_tensor * out = build_graph(ctx);
  335. if (op_name != nullptr && op_desc(out) != op_name) {
  336. //printf(" %s: skipping\n", op_desc(out).c_str());
  337. ggml_free(ctx);
  338. return true;
  339. }
  340. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  341. fflush(stdout);
  342. // check if the backends support the ops
  343. bool supported = true;
  344. for (ggml_backend_t backend : {backend1, backend2}) {
  345. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  346. if (!ggml_backend_supports_op(backend, t)) {
  347. printf("not supported [%s] ", ggml_backend_name(backend));
  348. supported = false;
  349. break;
  350. }
  351. }
  352. }
  353. if (!supported) {
  354. printf("\n");
  355. ggml_free(ctx);
  356. return true;
  357. }
  358. // post-graph sentinel
  359. add_sentinel(ctx);
  360. // allocate
  361. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  362. if (buf == NULL) {
  363. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
  364. ggml_free(ctx);
  365. return false;
  366. }
  367. // build graph
  368. ggml_build_forward_expand(gf, out);
  369. // add sentinels as graph nodes so that they are checked in the callback
  370. for (ggml_tensor * sentinel : sentinels) {
  371. gf->nodes[gf->n_nodes++] = sentinel;
  372. }
  373. // randomize tensors
  374. initialize_tensors(ctx);
  375. // compare
  376. struct callback_userdata {
  377. bool ok;
  378. double max_err;
  379. ggml_backend_t backend1;
  380. ggml_backend_t backend2;
  381. };
  382. callback_userdata ud {
  383. true,
  384. max_nmse_err(),
  385. backend1,
  386. backend2
  387. };
  388. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  389. callback_userdata * ud = (callback_userdata *) user_data;
  390. const char * bn1 = ggml_backend_name(ud->backend1);
  391. const char * bn2 = ggml_backend_name(ud->backend2);
  392. if (t1->op == GGML_OP_NONE) {
  393. // sentinels must be unchanged
  394. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  395. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  396. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  397. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  398. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  399. printf("sentinel mismatch: %s ", t1->name);
  400. ud->ok = false;
  401. return true;
  402. }
  403. }
  404. std::vector<float> f1 = tensor_to_float(t1);
  405. std::vector<float> f2 = tensor_to_float(t2);
  406. for (size_t i = 0; i < f1.size(); i++) {
  407. // check for nans
  408. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  409. printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
  410. ud->ok = false;
  411. return true;
  412. }
  413. // check for infs: both must be inf of the same sign, or both must be finite
  414. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  415. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  416. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  417. printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  418. ud->ok = false;
  419. return true;
  420. }
  421. } else {
  422. printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  423. ud->ok = false;
  424. return true;
  425. }
  426. }
  427. }
  428. double err = nmse(f1.data(), f2.data(), f1.size());
  429. if (err > ud->max_err) {
  430. printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
  431. //for (int i = 0; i < (int) f1.size(); i++) {
  432. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  433. //}
  434. //printf("\n");
  435. //exit(1);
  436. ud->ok = false;
  437. }
  438. return true;
  439. GGML_UNUSED(index);
  440. };
  441. const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
  442. if (!cmp_ok) {
  443. printf("compare failed ");
  444. }
  445. ggml_backend_buffer_free(buf);
  446. ggml_free(ctx);
  447. if (ud.ok && cmp_ok) {
  448. printf("\033[1;32mOK\033[0m\n");
  449. return true;
  450. }
  451. printf("\033[1;31mFAIL\033[0m\n");
  452. return false;
  453. }
  454. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  455. mode = MODE_PERF;
  456. static const size_t graph_nodes = 8192;
  457. ggml_init_params params = {
  458. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  459. /* .mem_base = */ NULL,
  460. /* .no_alloc = */ true,
  461. };
  462. ggml_context * ctx = ggml_init(params);
  463. ggml_tensor * out = build_graph(ctx);
  464. if (op_name != nullptr && op_desc(out) != op_name) {
  465. //printf(" %s: skipping\n", op_desc(out).c_str());
  466. ggml_free(ctx);
  467. return true;
  468. }
  469. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  470. fflush(stdout);
  471. // check if backends support op
  472. if (!ggml_backend_supports_op(backend, out)) {
  473. printf("not supported\n");
  474. ggml_free(ctx);
  475. return true;
  476. }
  477. // align while also leaving some margin for variations in parameters
  478. int align = 20;
  479. int last = (len + align - 1) / align * align;
  480. if (last - len < 5) {
  481. last += align;
  482. }
  483. last = std::max(last, 60);
  484. printf("%*s", last - len, "");
  485. // allocate
  486. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  487. if (buf == NULL) {
  488. printf("failed to allocate tensors\n");
  489. ggml_free(ctx);
  490. return false;
  491. }
  492. // randomize tensors
  493. initialize_tensors(ctx);
  494. // build graph
  495. ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
  496. ggml_build_forward_expand(gf, out);
  497. // warmup run
  498. ggml_backend_graph_compute(backend, gf);
  499. // duplicate the op
  500. size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
  501. int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
  502. for (int i = 1; i < n_runs; i++) {
  503. gf->nodes[gf->n_nodes++] = out;
  504. }
  505. // calculate memory
  506. size_t mem = n_runs * op_size(out);
  507. auto tensor_op_size = [](ggml_tensor * t) {
  508. size_t size = ggml_nbytes(t);
  509. // add source tensors
  510. for (int i = 0; i < GGML_MAX_SRC; i++) {
  511. if (t->src[i] != NULL) {
  512. size += ggml_nbytes(t->src[i]);
  513. }
  514. }
  515. return size;
  516. };
  517. for (int i = 0; i < gf->n_nodes; i++) {
  518. if (ggml_is_view_op(gf->nodes[i]->op) || gf->nodes[i] == out) {
  519. continue;
  520. }
  521. mem += tensor_op_size(gf->nodes[i]);
  522. }
  523. // run
  524. ggml_backend_synchronize(backend);
  525. int64_t start_time = ggml_time_us();
  526. ggml_backend_graph_compute(backend, gf);
  527. ggml_backend_synchronize(backend);
  528. int64_t end_time = ggml_time_us();
  529. double time_us = end_time - start_time;
  530. printf(" %5d runs - %8.2f us/run - %8zu kB/run - \033[1;34m%7.2f GB/s\033[0m\n",
  531. n_runs,
  532. time_us / n_runs,
  533. op_size(out) / 1024,
  534. mem / (time_us/1e6) / 1024.0 / 1024.0 / 1024.0);
  535. ggml_backend_buffer_free(buf);
  536. ggml_free(ctx);
  537. return true;
  538. }
  539. };
  540. // GGML_OP_UNARY
  541. struct test_unary : public test_case {
  542. const ggml_unary_op op;
  543. const ggml_type type;
  544. const std::array<int64_t, 4> ne_a;
  545. int v; // view (1 : non-contiguous a)
  546. std::string vars() override {
  547. return VARS_TO_STR3(type, ne_a, v);
  548. }
  549. test_unary(ggml_unary_op op,
  550. ggml_type type = GGML_TYPE_F32,
  551. std::array<int64_t, 4> ne_a = {128, 10, 10, 10},
  552. int v = 0)
  553. : op(op), type(type), ne_a(ne_a), v(v) {}
  554. ggml_tensor * build_graph(ggml_context * ctx) override {
  555. ggml_tensor * a;
  556. if (v & 1) {
  557. auto ne = ne_a; ne[0] *= 3;
  558. a = ggml_new_tensor(ctx, type, 4, ne.data());
  559. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  560. } else {
  561. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  562. }
  563. ggml_tensor * out = ggml_unary(ctx, a, op);
  564. return out;
  565. }
  566. void initialize_tensors(ggml_context * ctx) override {
  567. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  568. // test extended range of values to check for NaNs in GELU
  569. init_tensor_uniform(t, -150.f, 150.f);
  570. }
  571. }
  572. };
  573. // GGML_OP_GET_ROWS
  574. struct test_get_rows : public test_case {
  575. const ggml_type type;
  576. const int n; // cols
  577. const int m; // rows
  578. const int r; // rows to get
  579. const int b; // batch size
  580. const bool v; // view (non-contiguous src1)
  581. std::string vars() override {
  582. return VARS_TO_STR6(type, n, m, r, b, v);
  583. }
  584. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  585. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  586. ggml_tensor * build_graph(ggml_context * ctx) override {
  587. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  588. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  589. if (v) {
  590. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  591. }
  592. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  593. return out;
  594. }
  595. void initialize_tensors(ggml_context * ctx) override {
  596. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  597. if (t->type == GGML_TYPE_I32) {
  598. if (ggml_is_view_op(t->op)) { continue; }
  599. // rows
  600. std::vector<int> data(r*b);
  601. for (int i = 0; i < r*b; i++) {
  602. data[i] = rand() % m;
  603. }
  604. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  605. } else {
  606. init_tensor_uniform(t);
  607. }
  608. }
  609. }
  610. };
  611. // GGML_OP_REPEAT
  612. struct test_repeat : public test_case {
  613. const ggml_type type;
  614. const std::array<int64_t, 4> ne;
  615. const std::array<int, 4> nr;
  616. std::string vars() override {
  617. return VARS_TO_STR3(type, ne, nr);
  618. }
  619. size_t op_size(ggml_tensor * t) override {
  620. return ggml_nbytes(t) * 2;
  621. }
  622. test_repeat(ggml_type type = GGML_TYPE_F32,
  623. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  624. std::array<int, 4> nr = {2, 2, 2, 2})
  625. : type(type), ne(ne), nr(nr) {}
  626. ggml_tensor * build_graph(ggml_context * ctx) override {
  627. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  628. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  629. ggml_tensor * out = ggml_repeat(ctx, src, target);
  630. return out;
  631. }
  632. };
  633. // GGML_OP_DUP
  634. struct test_dup : public test_case {
  635. const ggml_type type;
  636. const std::array<int64_t, 4> ne;
  637. const std::array<int64_t, 4> permute;
  638. bool _use_permute;
  639. std::string vars() override {
  640. std::string v = VARS_TO_STR2(type, ne);
  641. if (_use_permute) v += "," + VAR_TO_STR(permute);
  642. return v;
  643. }
  644. test_dup(ggml_type type = GGML_TYPE_F32,
  645. std::array<int64_t, 4> ne = {10, 10, 20, 1},
  646. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  647. : type(type), ne(ne), permute(permute),
  648. _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  649. ggml_tensor * build_graph(ggml_context * ctx) override {
  650. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  651. if (_use_permute) {
  652. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  653. }
  654. ggml_tensor * out = ggml_dup(ctx, src);
  655. return out;
  656. }
  657. };
  658. // GGML_OP_CPY
  659. struct test_cpy : public test_case {
  660. const ggml_type type_src;
  661. const ggml_type type_dst;
  662. const std::array<int64_t, 4> ne;
  663. const std::array<int64_t, 4> permute;
  664. bool _src_use_permute;
  665. std::string vars() override {
  666. return VARS_TO_STR4(type_src, type_dst, ne, permute);
  667. }
  668. double max_nmse_err() override {
  669. return 1e-6;
  670. }
  671. size_t op_size(ggml_tensor * t) override {
  672. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  673. }
  674. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  675. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  676. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  677. : type_src(type_src), type_dst(type_dst), ne(ne), permute(permute),
  678. _src_use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  679. ggml_tensor * build_graph(ggml_context * ctx) override {
  680. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  681. if (_src_use_permute) {
  682. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  683. }
  684. ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, src->ne);
  685. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  686. return out;
  687. }
  688. };
  689. // GGML_OP_CONT
  690. struct test_cont : public test_case {
  691. const ggml_type type;
  692. const std::array<int64_t, 4> ne;
  693. std::string vars() override {
  694. return VARS_TO_STR2(type, ne);
  695. }
  696. test_cont(ggml_type type = GGML_TYPE_F32,
  697. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  698. : type(type), ne(ne) {}
  699. ggml_tensor * build_graph(ggml_context * ctx) override {
  700. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  701. src = ggml_transpose(ctx, src);
  702. ggml_tensor * out = ggml_cont(ctx, src);
  703. return out;
  704. }
  705. };
  706. // GGML_OP_ADD
  707. // GGML_OP_MUL
  708. // GGML_OP_DIV
  709. struct test_bin_bcast : public test_case {
  710. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  711. op_t op;
  712. const ggml_type type;
  713. const std::array<int64_t, 4> ne;
  714. const std::array<int, 4> nr;
  715. std::string vars() override {
  716. return VARS_TO_STR3(type, ne, nr);
  717. }
  718. size_t op_size(ggml_tensor * t) override {
  719. return ggml_nbytes(t) * 3;
  720. }
  721. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  722. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  723. std::array<int, 4> nr = {1, 2, 1, 1})
  724. : op(op), type(type), ne(ne), nr(nr) {}
  725. ggml_tensor * build_graph(ggml_context * ctx) override {
  726. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  727. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  728. ggml_tensor * out = op(ctx, a, b);
  729. return out;
  730. }
  731. void initialize_tensors(ggml_context * ctx) override {
  732. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  733. if (op == ggml_div) {
  734. // avoid division by zero
  735. init_tensor_uniform(t, 1.0f, 2.0f);
  736. } else {
  737. init_tensor_uniform(t);
  738. }
  739. }
  740. }
  741. };
  742. // GGML_OP_SCALE
  743. struct test_scale : public test_case {
  744. const ggml_type type;
  745. const std::array<int64_t, 4> ne;
  746. float scale;
  747. std::string vars() override {
  748. return VARS_TO_STR3(type, ne, scale);
  749. }
  750. test_scale(ggml_type type = GGML_TYPE_F32,
  751. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  752. float scale = 2.0f)
  753. : type(type), ne(ne), scale(scale) {}
  754. ggml_tensor * build_graph(ggml_context * ctx) override {
  755. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  756. ggml_tensor * out = ggml_scale(ctx, a, scale);
  757. return out;
  758. }
  759. };
  760. // GGML_OP_NORM
  761. struct test_norm : public test_case {
  762. const ggml_type type;
  763. const std::array<int64_t, 4> ne;
  764. float eps;
  765. std::string vars() override {
  766. return VARS_TO_STR3(type, ne, eps);
  767. }
  768. test_norm(ggml_type type = GGML_TYPE_F32,
  769. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  770. float eps = 1e-6f)
  771. : type(type), ne(ne), eps(eps) {}
  772. ggml_tensor * build_graph(ggml_context * ctx) override {
  773. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  774. ggml_tensor * out = ggml_norm(ctx, a, eps);
  775. return out;
  776. }
  777. };
  778. // GGML_OP_RMS_NORM
  779. struct test_rms_norm : public test_case {
  780. const ggml_type type;
  781. const std::array<int64_t, 4> ne;
  782. float eps;
  783. std::string vars() override {
  784. return VARS_TO_STR3(type, ne, eps);
  785. }
  786. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  787. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  788. float eps = 1e-6f)
  789. : type(type), ne(ne), eps(eps) {}
  790. ggml_tensor * build_graph(ggml_context * ctx) override {
  791. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  792. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  793. return out;
  794. }
  795. };
  796. // GGML_OP_SSM_CONV
  797. struct test_ssm_conv : public test_case {
  798. const ggml_type type;
  799. const std::array<int64_t, 4> ne_a;
  800. const std::array<int64_t, 4> ne_b;
  801. std::string vars() override {
  802. return VARS_TO_STR3(type, ne_a, ne_b);
  803. }
  804. test_ssm_conv(ggml_type type = GGML_TYPE_F32,
  805. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  806. std::array<int64_t, 4> ne_b = {3, 3, 1, 1})
  807. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  808. ggml_tensor * build_graph(ggml_context * ctx) override {
  809. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  810. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  811. ggml_tensor * out = ggml_ssm_conv(ctx, a, b);
  812. return out;
  813. }
  814. };
  815. // GGML_OP_SSM_SCAN
  816. struct test_ssm_scan : public test_case {
  817. const ggml_type type;
  818. const int64_t d_state;
  819. const int64_t d_inner;
  820. const int64_t n_seq_tokens;
  821. const int64_t n_seqs;
  822. std::string vars() override {
  823. return VARS_TO_STR5(type, d_state, d_inner, n_seq_tokens, n_seqs);
  824. }
  825. test_ssm_scan(ggml_type type = GGML_TYPE_F32,
  826. int64_t d_state = 32, int64_t d_inner = 32, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  827. : type(type), d_state(d_state), d_inner(d_inner), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  828. ggml_tensor * build_graph(ggml_context * ctx) override {
  829. ggml_tensor * s = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, d_inner, n_seqs, 1 }.data());
  830. ggml_tensor * x = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_inner, n_seq_tokens, n_seqs, 1 }.data());
  831. ggml_tensor * dt = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_inner, n_seq_tokens, n_seqs, 1 }.data());
  832. ggml_tensor * A = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, d_inner, 1 , 1 }.data());
  833. ggml_tensor * B = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, n_seq_tokens, n_seqs, 1 }.data());
  834. ggml_tensor * C = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, n_seq_tokens, n_seqs, 1 }.data());
  835. ggml_tensor * out = ggml_ssm_scan(ctx, s, x, dt, A, B, C);
  836. return out;
  837. }
  838. };
  839. // GGML_OP_MUL_MAT
  840. struct test_mul_mat : public test_case {
  841. const ggml_type type_a;
  842. const ggml_type type_b;
  843. const int64_t m;
  844. const int64_t n;
  845. const int64_t k;
  846. const std::array<int64_t, 2> bs; // dims 3 and 4
  847. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  848. std::string vars() override {
  849. return VARS_TO_STR7(type_a, type_b, m, n, k, bs, nr);
  850. }
  851. double max_nmse_err() override {
  852. return 5e-4;
  853. }
  854. size_t op_size(ggml_tensor * t) override {
  855. size_t a = ggml_nbytes(t->src[0]) * n * nr[0] * nr[1];
  856. size_t b = ggml_nbytes(t->src[1]) * m;
  857. size_t c = ggml_nbytes(t);
  858. return a + b + c;
  859. GGML_UNUSED(t);
  860. }
  861. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  862. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  863. std::array<int64_t, 2> bs = {10, 10},
  864. std::array<int64_t, 2> nr = {2, 2})
  865. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr) {}
  866. ggml_tensor * build_graph(ggml_context * ctx) override {
  867. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  868. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0] , bs[1]);
  869. ggml_tensor * b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  870. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  871. return out;
  872. }
  873. };
  874. // GGML_OP_MUL_MAT_ID
  875. struct test_mul_mat_id : public test_case {
  876. const ggml_type type_a;
  877. const ggml_type type_b;
  878. const int n_mats;
  879. const int n_used;
  880. const bool b; // brodcast b matrix
  881. const int64_t m;
  882. const int64_t n;
  883. const int64_t k;
  884. std::string vars() override {
  885. return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k);
  886. }
  887. double max_nmse_err() override {
  888. return 5e-4;
  889. }
  890. size_t op_size(ggml_tensor * t) override {
  891. size_t a = ggml_nbytes(t->src[2]) * n;
  892. size_t b = ggml_nbytes(t->src[1]) * m;
  893. size_t c = ggml_nbytes(t);
  894. return a + b + c;
  895. GGML_UNUSED(t);
  896. }
  897. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  898. int n_mats = 8, int n_used = 2, bool b = false,
  899. int64_t m = 32, int64_t n = 32, int64_t k = 32)
  900. : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b),
  901. m(m), n(n), k(k) {
  902. GGML_ASSERT(n_used <= n_mats);
  903. }
  904. ggml_tensor * build_graph(ggml_context * ctx) override {
  905. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  906. ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats);
  907. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  908. if (n_used != n_mats) {
  909. ids = ggml_view_2d(ctx, ids, n_used, n, ids->nb[1], 0);
  910. }
  911. ggml_tensor * b = ggml_new_tensor_3d(ctx, type_b, k, this->b ? 1 : n_used, n);
  912. ggml_tensor * out = ggml_mul_mat_id(ctx, as, b, ids);
  913. return out;
  914. }
  915. void initialize_tensors(ggml_context * ctx) override {
  916. std::random_device rd;
  917. std::default_random_engine rng(rd());
  918. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  919. if (t->type == GGML_TYPE_I32) {
  920. if (ggml_is_view_op(t->op)) { continue; }
  921. // ids
  922. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  923. std::vector<int32_t> data(t->ne[0]);
  924. for (int i = 0; i < t->ne[0]; i++) {
  925. data[i] = i % n_mats;
  926. }
  927. std::shuffle(data.begin(), data.end(), rng);
  928. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  929. }
  930. } else {
  931. init_tensor_uniform(t);
  932. }
  933. }
  934. }
  935. };
  936. // GGML_OP_SQR
  937. struct test_sqr : public test_case {
  938. const ggml_type type;
  939. const std::array<int64_t, 4> ne;
  940. std::string vars() override {
  941. return VARS_TO_STR2(type, ne);
  942. }
  943. test_sqr(ggml_type type = GGML_TYPE_F32,
  944. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  945. : type(type), ne(ne) {}
  946. ggml_tensor * build_graph(ggml_context * ctx) override {
  947. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  948. ggml_tensor * out = ggml_sqr(ctx, a);
  949. return out;
  950. }
  951. };
  952. // GGML_OP_SQRT
  953. struct test_sqrt : public test_case {
  954. const ggml_type type;
  955. const std::array<int64_t, 4> ne;
  956. std::string vars() override {
  957. return VARS_TO_STR2(type, ne);
  958. }
  959. test_sqrt(ggml_type type = GGML_TYPE_F32,
  960. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  961. : type(type), ne(ne) {}
  962. ggml_tensor * build_graph(ggml_context * ctx) override {
  963. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  964. ggml_tensor * out = ggml_sqrt(ctx, a);
  965. return out;
  966. }
  967. void initialize_tensors(ggml_context * ctx) override {
  968. // fill with positive values
  969. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  970. init_tensor_uniform(t, 0.0f, 100.0f);
  971. }
  972. }
  973. };
  974. // GGML_OP_SIN
  975. struct test_sin : public test_case {
  976. const ggml_type type;
  977. const std::array<int64_t, 4> ne;
  978. std::string vars() override {
  979. return VARS_TO_STR2(type, ne);
  980. }
  981. test_sin(ggml_type type = GGML_TYPE_F32,
  982. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  983. : type(type), ne(ne) {}
  984. ggml_tensor * build_graph(ggml_context * ctx) override {
  985. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  986. ggml_tensor * out = ggml_sin(ctx, a);
  987. return out;
  988. }
  989. void initialize_tensors(ggml_context * ctx) override {
  990. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  991. init_tensor_uniform(t, -100.0f, 100.0f);
  992. }
  993. }
  994. };
  995. // GGML_OP_COS
  996. struct test_cos : public test_case {
  997. const ggml_type type;
  998. const std::array<int64_t, 4> ne;
  999. std::string vars() override {
  1000. return VARS_TO_STR2(type, ne);
  1001. }
  1002. test_cos(ggml_type type = GGML_TYPE_F32,
  1003. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1004. : type(type), ne(ne) {}
  1005. ggml_tensor * build_graph(ggml_context * ctx) override {
  1006. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1007. ggml_tensor * out = ggml_cos(ctx, a);
  1008. return out;
  1009. }
  1010. void initialize_tensors(ggml_context * ctx) override {
  1011. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1012. init_tensor_uniform(t, -100.0f, 100.0f);
  1013. }
  1014. }
  1015. };
  1016. // GGML_OP_CLAMP
  1017. struct test_clamp : public test_case {
  1018. const ggml_type type;
  1019. const std::array<int64_t, 4> ne;
  1020. float min;
  1021. float max;
  1022. std::string vars() override {
  1023. return VARS_TO_STR4(type, ne, min, max);
  1024. }
  1025. test_clamp(ggml_type type = GGML_TYPE_F32,
  1026. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  1027. float min = -0.5f, float max = 0.5f)
  1028. : type(type), ne(ne), min(min), max(max) {}
  1029. ggml_tensor * build_graph(ggml_context * ctx) override {
  1030. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1031. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  1032. return out;
  1033. }
  1034. };
  1035. // GGML_OP_DIAG_MASK_INF
  1036. struct test_diag_mask_inf : public test_case {
  1037. const ggml_type type;
  1038. const std::array<int64_t, 4> ne;
  1039. const int n_past;
  1040. std::string vars() override {
  1041. return VARS_TO_STR3(type, ne, n_past);
  1042. }
  1043. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  1044. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  1045. int n_past = 5)
  1046. : type(type), ne(ne), n_past(n_past) {}
  1047. ggml_tensor * build_graph(ggml_context * ctx) override {
  1048. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1049. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  1050. return out;
  1051. }
  1052. };
  1053. // GGML_OP_SOFT_MAX
  1054. struct test_soft_max : public test_case {
  1055. const ggml_type type;
  1056. const std::array<int64_t, 4> ne;
  1057. const bool mask;
  1058. const float scale;
  1059. const float max_bias;
  1060. std::string vars() override {
  1061. return VARS_TO_STR5(type, ne, mask, scale, max_bias);
  1062. }
  1063. // the 1024 test with bias occasionally fails:
  1064. // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL
  1065. virtual double max_nmse_err() override {
  1066. return 1e-6;
  1067. }
  1068. test_soft_max(ggml_type type = GGML_TYPE_F32,
  1069. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  1070. bool mask = false,
  1071. float scale = 1.0f,
  1072. float max_bias = 0.0f)
  1073. : type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {}
  1074. ggml_tensor * build_graph(ggml_context * ctx) override {
  1075. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1076. ggml_tensor * mask = nullptr;
  1077. if (this->mask) {
  1078. mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
  1079. }
  1080. ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
  1081. return out;
  1082. }
  1083. };
  1084. // GGML_OP_ROPE
  1085. struct test_rope : public test_case {
  1086. const ggml_type type;
  1087. const std::array<int64_t, 4> ne_a;
  1088. int n_dims;
  1089. int mode;
  1090. int n_ctx; // used to generate positions
  1091. float fs; // freq_scale
  1092. float ef; // ext_factor
  1093. float af; // attn_factor
  1094. bool ff;
  1095. int v; // view (1 : non-contiguous a)
  1096. std::string vars() override {
  1097. return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v);
  1098. }
  1099. test_rope(ggml_type type = GGML_TYPE_F32,
  1100. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  1101. int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f, float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0)
  1102. : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v) {}
  1103. ggml_tensor * build_graph(ggml_context * ctx) override {
  1104. ggml_tensor * a;
  1105. if (v & 1) {
  1106. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1107. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1108. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1109. } else {
  1110. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1111. }
  1112. ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]);
  1113. ggml_tensor * freq = ff ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2) : nullptr;
  1114. ggml_tensor * out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1115. return out;
  1116. }
  1117. void initialize_tensors(ggml_context * ctx) override {
  1118. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1119. if (t->type == GGML_TYPE_I32) {
  1120. // pos
  1121. std::vector<int> data(ne_a[2]);
  1122. for (int i = 0; i < ne_a[2]; i++) {
  1123. data[i] = rand() % n_ctx;
  1124. }
  1125. ggml_backend_tensor_set(t, data.data(), 0, ne_a[2] * sizeof(int));
  1126. } else {
  1127. if (t->ne[0] == n_dims/2) {
  1128. // frequency factors in the range [0.9f, 1.1f]
  1129. init_tensor_uniform(t, 0.9f, 1.1f);
  1130. } else {
  1131. init_tensor_uniform(t);
  1132. }
  1133. }
  1134. }
  1135. }
  1136. };
  1137. // GGML_OP_POOL2D
  1138. struct test_pool2d : public test_case {
  1139. enum ggml_op_pool pool_type;
  1140. const ggml_type type_input;
  1141. const std::array<int64_t, 4> ne_input;
  1142. // kernel size
  1143. const int k0;
  1144. const int k1;
  1145. // stride
  1146. const int s0;
  1147. const int s1;
  1148. // padding
  1149. const int p0;
  1150. const int p1;
  1151. std::string vars() override {
  1152. return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1);
  1153. }
  1154. test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG,
  1155. ggml_type type_input = GGML_TYPE_F32,
  1156. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1157. int k0 = 3, int k1 = 3,
  1158. int s0 = 1, int s1 = 1,
  1159. int p0 = 1, int p1 = 1)
  1160. : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {}
  1161. ggml_tensor * build_graph(ggml_context * ctx) override {
  1162. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1163. ggml_tensor * out = ggml_pool_2d(ctx, input, pool_type, k0, k1, s0, s1, p0, p1);
  1164. return out;
  1165. }
  1166. };
  1167. // GGML_OP_CONV_TRANSPOSE_1D
  1168. struct test_conv_transpose_1d : public test_case {
  1169. const std::array<int64_t, 4> ne_input;
  1170. const std::array<int64_t, 4> ne_kernel;
  1171. const int s0; // stride
  1172. const int p0; // padding
  1173. const int d0; // dilation
  1174. std::string vars() override {
  1175. return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0);
  1176. }
  1177. test_conv_transpose_1d(std::array<int64_t, 4> ne_input = {197, 32, 1, 1}, // [input_width, input_height, input_channels, 1]
  1178. std::array<int64_t, 4> ne_kernel = {16, 32, 32, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1179. int s0 = 1, int p0 = 0, int d0 = 1)
  1180. : ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {}
  1181. ggml_tensor * build_graph(ggml_context * ctx) override {
  1182. ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
  1183. ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
  1184. ggml_tensor * out = ggml_conv_transpose_1d(ctx, kernel, input, s0, p0, d0);
  1185. return out;
  1186. }
  1187. };
  1188. // GGML_OP_IM2COL
  1189. struct test_im2col : public test_case {
  1190. const ggml_type type_input;
  1191. const ggml_type type_kernel;
  1192. const ggml_type dst_type;
  1193. const std::array<int64_t, 4> ne_input;
  1194. const std::array<int64_t, 4> ne_kernel;
  1195. // stride
  1196. const int s0;
  1197. const int s1;
  1198. // padding
  1199. const int p0;
  1200. const int p1;
  1201. // dilation
  1202. const int d0;
  1203. const int d1;
  1204. // mode
  1205. const bool is_2D;
  1206. std::string vars() override {
  1207. return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  1208. }
  1209. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32,
  1210. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1211. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1212. int s0 = 1, int s1 = 1,
  1213. int p0 = 1, int p1 = 1,
  1214. int d0 = 1, int d1 = 1,
  1215. bool is_2D = true)
  1216. : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  1217. ggml_tensor * build_graph(ggml_context * ctx) override {
  1218. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1219. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  1220. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D, dst_type);
  1221. return out;
  1222. }
  1223. };
  1224. // GGML_OP_CONCAT
  1225. struct test_concat : public test_case {
  1226. const ggml_type type;
  1227. const std::array<int64_t, 4> ne_a;
  1228. const int64_t ne_b_d;
  1229. const int dim;
  1230. const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b)
  1231. std::string vars() override {
  1232. return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v);
  1233. }
  1234. test_concat(ggml_type type = GGML_TYPE_F32,
  1235. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1236. int64_t ne_b_d = 10,
  1237. int dim = 2, int v = 0)
  1238. : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {}
  1239. ggml_tensor * build_graph(ggml_context * ctx) override {
  1240. auto ne_b = ne_a;
  1241. ne_b[dim] = ne_b_d;
  1242. ggml_tensor * a;
  1243. if (v & 1) {
  1244. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1245. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1246. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1247. } else {
  1248. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1249. }
  1250. ggml_tensor * b;
  1251. if (v & 2) {
  1252. auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4;
  1253. b = ggml_new_tensor(ctx, type, 4, ne.data());
  1254. b = ggml_view_4d(ctx, b, ne_b[0], ne_b[1], ne_b[2], ne_b[3], b->nb[1], b->nb[2], b->nb[3], 0);
  1255. } else {
  1256. b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1257. }
  1258. ggml_tensor * out = ggml_concat(ctx, a, b, dim);
  1259. return out;
  1260. }
  1261. };
  1262. // GGML_OP_ARGSORT
  1263. struct test_argsort : public test_case {
  1264. const ggml_type type;
  1265. const std::array<int64_t, 4> ne;
  1266. ggml_sort_order order;
  1267. std::string vars() override {
  1268. return VARS_TO_STR3(type, ne, order);
  1269. }
  1270. test_argsort(ggml_type type = GGML_TYPE_F32,
  1271. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  1272. ggml_sort_order order = GGML_SORT_ORDER_ASC)
  1273. : type(type), ne(ne), order(order) {}
  1274. ggml_tensor * build_graph(ggml_context * ctx) override {
  1275. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1276. ggml_tensor * out = ggml_argsort(ctx, a, order);
  1277. return out;
  1278. }
  1279. void initialize_tensors(ggml_context * ctx) override {
  1280. std::random_device rd;
  1281. std::default_random_engine rng(rd());
  1282. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1283. if (t->type == GGML_TYPE_I32) {
  1284. // indices
  1285. std::vector<int> data(ggml_nelements(t));
  1286. for (int i = 0; i < ggml_nelements(t); i++) {
  1287. data[i] = rand();
  1288. }
  1289. std::shuffle(data.begin(), data.end(), rng);
  1290. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  1291. } else if (t->type == GGML_TYPE_F32) {
  1292. // initialize with unique values to avoid ties
  1293. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1294. std::vector<float> data(t->ne[0]);
  1295. for (int i = 0; i < t->ne[0]; i++) {
  1296. data[i] = i;
  1297. }
  1298. std::shuffle(data.begin(), data.end(), rng);
  1299. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  1300. }
  1301. } else {
  1302. GGML_ABORT("fatal error");
  1303. }
  1304. }
  1305. }
  1306. };
  1307. // GGML_OP_SUM_ROWS
  1308. struct test_sum_rows : public test_case {
  1309. const ggml_type type;
  1310. const std::array<int64_t, 4> ne;
  1311. std::string vars() override {
  1312. return VARS_TO_STR2(type, ne);
  1313. }
  1314. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  1315. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1316. : type(type), ne(ne) {}
  1317. ggml_tensor * build_graph(ggml_context * ctx) override {
  1318. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1319. ggml_tensor * out = ggml_sum_rows(ctx, a);
  1320. return out;
  1321. }
  1322. };
  1323. // GGML_OP_UPSCALE
  1324. struct test_upscale : public test_case {
  1325. const ggml_type type;
  1326. const std::array<int64_t, 4> ne;
  1327. const int32_t scale_factor;
  1328. const bool transpose;
  1329. std::string vars() override {
  1330. return VARS_TO_STR4(type, ne, scale_factor, transpose);
  1331. }
  1332. test_upscale(ggml_type type = GGML_TYPE_F32,
  1333. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  1334. int32_t scale_factor = 2, bool transpose = false)
  1335. : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {}
  1336. ggml_tensor * build_graph(ggml_context * ctx) override {
  1337. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1338. if (transpose) a = ggml_transpose(ctx, a);
  1339. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
  1340. return out;
  1341. }
  1342. };
  1343. // GGML_OP_UPSCALE (ext)
  1344. struct test_upscale_ext : public test_case {
  1345. const ggml_type type;
  1346. const std::array<int64_t, 4> ne;
  1347. const std::array<int64_t, 4> ne_tgt;
  1348. std::string vars() override {
  1349. return VARS_TO_STR3(type, ne, ne_tgt);
  1350. }
  1351. test_upscale_ext(ggml_type type = GGML_TYPE_F32,
  1352. std::array<int64_t, 4> ne = {2, 5, 7, 11},
  1353. std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13})
  1354. : type(type), ne(ne), ne_tgt(ne_tgt) {}
  1355. ggml_tensor * build_graph(ggml_context * ctx) override {
  1356. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1357. ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]);
  1358. return out;
  1359. }
  1360. };
  1361. // GGML_OP_GROUP_NORM
  1362. struct test_group_norm : public test_case {
  1363. const ggml_type type;
  1364. const std::array<int64_t, 4> ne;
  1365. const int32_t num_groups;
  1366. const float eps;
  1367. std::string vars() override {
  1368. return VARS_TO_STR3(type, ne, num_groups);
  1369. }
  1370. test_group_norm(ggml_type type = GGML_TYPE_F32,
  1371. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  1372. int32_t num_groups = 32,
  1373. float eps = 1e-6f)
  1374. : type(type), ne(ne), num_groups(num_groups), eps(eps) {}
  1375. ggml_tensor * build_graph(ggml_context * ctx) override {
  1376. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1377. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups, eps);
  1378. return out;
  1379. }
  1380. };
  1381. // GGML_OP_ACC
  1382. struct test_acc : public test_case {
  1383. const ggml_type type;
  1384. const std::array<int64_t, 4> ne_a;
  1385. const std::array<int64_t, 4> ne_b;
  1386. std::string vars() override {
  1387. return VARS_TO_STR3(type, ne_a, ne_b);
  1388. }
  1389. test_acc(ggml_type type = GGML_TYPE_F32,
  1390. std::array<int64_t, 4> ne_a = {1024, 577, 1, 1},
  1391. std::array<int64_t, 4> ne_b = {1024, 576, 1, 1})
  1392. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1393. ggml_tensor * build_graph(ggml_context * ctx) override {
  1394. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1395. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1396. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  1397. return out;
  1398. }
  1399. };
  1400. // GGML_OP_PAD
  1401. struct test_pad : public test_case {
  1402. const ggml_type type;
  1403. const std::array<int64_t, 4> ne_a;
  1404. const int pad_0;
  1405. const int pad_1;
  1406. std::string vars() override {
  1407. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  1408. }
  1409. test_pad(ggml_type type = GGML_TYPE_F32,
  1410. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  1411. int pad_0 = 1, int pad_1 = 1)
  1412. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  1413. ggml_tensor * build_graph(ggml_context * ctx) override {
  1414. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1415. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  1416. return out;
  1417. }
  1418. };
  1419. // GGML_OP_ARANGE
  1420. struct test_arange : public test_case {
  1421. const ggml_type type;
  1422. const float start;
  1423. const float stop;
  1424. const float step;
  1425. std::string vars() override {
  1426. return VARS_TO_STR4(type, start, stop, step);
  1427. }
  1428. test_arange(ggml_type type = GGML_TYPE_F32,
  1429. float start = 0.f, float stop = 10.f, float step = 1.f)
  1430. : type(type), start(start), stop(stop), step(step) {}
  1431. ggml_tensor * build_graph(ggml_context * ctx) override {
  1432. ggml_tensor * out = ggml_arange(ctx, start, stop, step);
  1433. return out;
  1434. }
  1435. };
  1436. // GGML_OP_TIMESTEP_EMBEDDING
  1437. struct test_timestep_embedding : public test_case {
  1438. const ggml_type type;
  1439. const std::array<int64_t, 4> ne_a;
  1440. const int dim;
  1441. const int max_period;
  1442. std::string vars() override {
  1443. return VARS_TO_STR4(type, ne_a, dim, max_period);
  1444. }
  1445. test_timestep_embedding(ggml_type type = GGML_TYPE_F32,
  1446. std::array<int64_t, 4> ne_a = {2, 1, 1, 1},
  1447. int dim = 320, int max_period=10000)
  1448. : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {}
  1449. ggml_tensor * build_graph(ggml_context * ctx) override {
  1450. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1451. ggml_tensor * out = ggml_timestep_embedding(ctx, a, dim, max_period);
  1452. return out;
  1453. }
  1454. };
  1455. // GGML_OP_LEAKY_RELU
  1456. struct test_leaky_relu : public test_case {
  1457. const ggml_type type;
  1458. const std::array<int64_t, 4> ne_a;
  1459. const float negative_slope;
  1460. std::string vars() override {
  1461. return VARS_TO_STR3(type, ne_a, negative_slope);
  1462. }
  1463. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  1464. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1465. float negative_slope = 0.1f)
  1466. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  1467. ggml_tensor * build_graph(ggml_context * ctx) override {
  1468. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1469. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  1470. return out;
  1471. }
  1472. };
  1473. // GGML_OP_FLASH_ATTN_EXT
  1474. struct test_flash_attn_ext : public test_case {
  1475. const int64_t hs; // head size
  1476. const int64_t nh; // num heads
  1477. const int64_t kv; // kv size
  1478. const int64_t nb; // batch size
  1479. const bool mask; // use mask
  1480. const float max_bias; // ALiBi
  1481. const float logit_softcap; // Gemma 2
  1482. const ggml_type type_KV;
  1483. std::string vars() override {
  1484. return VARS_TO_STR8(hs, nh, kv, nb, mask, max_bias, logit_softcap, type_KV);
  1485. }
  1486. double max_nmse_err() override {
  1487. return 5e-4;
  1488. }
  1489. test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_type type_KV = GGML_TYPE_F16)
  1490. : hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), type_KV(type_KV) {}
  1491. ggml_tensor * build_graph(ggml_context * ctx) override {
  1492. const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV));
  1493. ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs_padded, nb, nh, 1);
  1494. ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1495. ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1496. ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr;
  1497. ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias, logit_softcap);
  1498. return out;
  1499. }
  1500. };
  1501. // GGML_OP_CROSS_ENTROPY_LOSS
  1502. struct test_cross_entropy_loss : public test_case {
  1503. const ggml_type type;
  1504. const std::array<int64_t, 4> ne;
  1505. std::string vars() override {
  1506. return VARS_TO_STR2(type, ne);
  1507. }
  1508. test_cross_entropy_loss(ggml_type type = GGML_TYPE_F32,
  1509. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1510. : type(type), ne(ne) {}
  1511. ggml_tensor * build_graph(ggml_context * ctx) override {
  1512. ggml_tensor * logits = ggml_new_tensor(ctx, type, 4, ne.data());
  1513. ggml_tensor * labels = ggml_new_tensor(ctx, type, 4, ne.data());
  1514. ggml_tensor * out = ggml_cross_entropy_loss(ctx, logits, labels);
  1515. return out;
  1516. }
  1517. };
  1518. enum llm_norm_type {
  1519. LLM_NORM,
  1520. LLM_NORM_RMS,
  1521. };
  1522. struct llama_hparams {
  1523. uint32_t n_vocab;
  1524. uint32_t n_embd;
  1525. uint32_t n_head;
  1526. uint32_t n_head_kv;
  1527. static constexpr uint32_t n_layer = 1;
  1528. uint32_t n_rot;
  1529. uint32_t n_embd_head; // dimension of values (d_v)
  1530. uint32_t n_ff;
  1531. float f_norm_eps;
  1532. float f_norm_rms_eps;
  1533. // cparams
  1534. static constexpr uint32_t n_ctx = 512; // user-specified context size
  1535. static constexpr uint32_t n_ctx_orig = n_ctx;
  1536. // batch
  1537. int32_t n_tokens;
  1538. // llm_build_context
  1539. static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx
  1540. static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache
  1541. uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads
  1542. return n_embd_head * n_head_kv;
  1543. }
  1544. };
  1545. // LLM base class
  1546. struct test_llm : public test_case {
  1547. llama_hparams hp;
  1548. protected:
  1549. test_llm(llama_hparams hp)
  1550. : hp(std::move(hp)) {
  1551. }
  1552. public:
  1553. struct ggml_tensor * llm_build_norm(
  1554. struct ggml_context * ctx,
  1555. struct ggml_tensor * cur,
  1556. struct ggml_tensor * mw,
  1557. struct ggml_tensor * mb,
  1558. llm_norm_type type) {
  1559. switch (type) {
  1560. case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break;
  1561. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break;
  1562. }
  1563. cur = ggml_mul(ctx, cur, mw);
  1564. if (mb) {
  1565. cur = ggml_add(ctx, cur, mb);
  1566. }
  1567. return cur;
  1568. }
  1569. void llm_build_kv_store(
  1570. struct ggml_context * ctx,
  1571. struct ggml_tensor * k_l,
  1572. struct ggml_tensor * v_l,
  1573. struct ggml_tensor * k_cur,
  1574. struct ggml_tensor * v_cur) {
  1575. // compute the transposed [n_tokens, n_embd] V matrix
  1576. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens));
  1577. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(),
  1578. (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head);
  1579. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(),
  1580. ( hp.n_ctx)*ggml_element_size(v_l),
  1581. (hp.kv_head)*ggml_element_size(v_l));
  1582. // important: storing RoPE-ed version of K in the KV cache!
  1583. ggml_cpy(ctx, k_cur, k_cache_view);
  1584. ggml_cpy(ctx, v_cur_t, v_cache_view);
  1585. }
  1586. struct ggml_tensor * llm_build_kqv(
  1587. struct ggml_context * ctx,
  1588. struct ggml_tensor * k_l,
  1589. struct ggml_tensor * v_l,
  1590. struct ggml_tensor * q_cur,
  1591. struct ggml_tensor * kq_mask,
  1592. float kq_scale) {
  1593. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  1594. struct ggml_tensor * k =
  1595. ggml_view_3d(ctx, k_l,
  1596. hp.n_embd_head, hp.n_kv, hp.n_head_kv,
  1597. ggml_row_size(k_l->type, hp.n_embd_gqa()),
  1598. ggml_row_size(k_l->type, hp.n_embd_head),
  1599. 0);
  1600. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  1601. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
  1602. // split cached v into n_head heads
  1603. struct ggml_tensor * v =
  1604. ggml_view_3d(ctx, v_l,
  1605. hp.n_kv, hp.n_embd_head, hp.n_head_kv,
  1606. ggml_element_size(v_l)*hp.n_ctx,
  1607. ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head,
  1608. 0);
  1609. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  1610. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  1611. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens);
  1612. struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1613. cur = ggml_mul_mat(ctx, wo, cur);
  1614. return cur;
  1615. }
  1616. void initialize_tensors(ggml_context * ctx) override {
  1617. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1618. if (t->type == GGML_TYPE_I32) {
  1619. // pos
  1620. std::vector<int> data(hp.n_tokens);
  1621. for (int i = 0; i < hp.n_tokens; i++) {
  1622. data[i] = rand() % hp.n_ctx;
  1623. }
  1624. ggml_backend_tensor_set(t, data.data(), 0, hp.n_tokens * sizeof(int));
  1625. } else {
  1626. init_tensor_uniform(t);
  1627. }
  1628. }
  1629. }
  1630. };
  1631. // Llama
  1632. struct test_llama : public test_llm {
  1633. static constexpr float freq_base = 10000.0f;
  1634. static constexpr float freq_scale = 1.0f;
  1635. static constexpr float ext_factor = 0.0f;
  1636. static constexpr float attn_factor = 1.0f;
  1637. static constexpr float beta_fast = 32.0f;
  1638. static constexpr float beta_slow = 1.0f;
  1639. std::string op_desc(ggml_tensor * t) override {
  1640. GGML_UNUSED(t);
  1641. return "LLAMA";
  1642. }
  1643. std::string vars() override {
  1644. auto n_tokens = hp.n_tokens;
  1645. return VARS_TO_STR1(n_tokens);
  1646. }
  1647. double max_nmse_err() override {
  1648. return 2e-3;
  1649. }
  1650. test_llama(int n_tokens = 1)
  1651. : test_llm({
  1652. /*n_vocab =*/ 32000,
  1653. /*n_embd =*/ 3200,
  1654. /*n_head =*/ 32,
  1655. /*n_head_kv =*/ 32,
  1656. /*n_rot =*/ 100,
  1657. /*n_embd_head =*/ 100,
  1658. /*n_ff =*/ 8640,
  1659. /*f_norm_eps =*/ 0.f,
  1660. /*f_norm_rms_eps =*/ 1e-5f,
  1661. /*n_tokens =*/ n_tokens,
  1662. }) {
  1663. }
  1664. ggml_tensor * build_graph(ggml_context * ctx) override {
  1665. struct ggml_tensor * cur;
  1666. struct ggml_tensor * inpL;
  1667. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1668. // inp_pos - contains the positions
  1669. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1670. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1671. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1672. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1673. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1674. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1675. struct ggml_tensor * inpSA = inpL;
  1676. // norm
  1677. ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1678. cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS);
  1679. // self-attention
  1680. {
  1681. ggml_tensor * wq = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1682. ggml_tensor * wk = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1683. ggml_tensor * wv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1684. // compute Q and K and RoPE them
  1685. struct ggml_tensor * Qcur = ggml_mul_mat(ctx, wq, cur);
  1686. struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur);
  1687. struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur);
  1688. Qcur = ggml_rope_ext(
  1689. ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr,
  1690. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1691. ext_factor, attn_factor, beta_fast, beta_slow
  1692. );
  1693. Kcur = ggml_rope_ext(
  1694. ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr,
  1695. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1696. ext_factor, attn_factor, beta_fast, beta_slow
  1697. );
  1698. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1699. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1700. }
  1701. struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA);
  1702. // feed-forward network
  1703. ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1704. cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS);
  1705. ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1706. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1707. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1708. struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur);
  1709. cur = ggml_mul_mat(ctx, ffn_gate, cur);
  1710. cur = ggml_silu(ctx, cur);
  1711. cur = ggml_mul(ctx, cur, tmp);
  1712. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1713. cur = ggml_add(ctx, cur, ffn_inp);
  1714. // input for next layer
  1715. inpL = cur;
  1716. }
  1717. cur = inpL;
  1718. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1719. cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS);
  1720. // lm_head
  1721. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab);
  1722. cur = ggml_mul_mat(ctx, output, cur);
  1723. return cur;
  1724. }
  1725. };
  1726. // Falcon
  1727. struct test_falcon : public test_llm {
  1728. static constexpr float freq_base = 10000.0f;
  1729. static constexpr float freq_scale = 1.0f;
  1730. static constexpr float ext_factor = 0.0f;
  1731. static constexpr float attn_factor = 1.0f;
  1732. static constexpr float beta_fast = 32.0f;
  1733. static constexpr float beta_slow = 1.0f;
  1734. std::string op_desc(ggml_tensor * t) override {
  1735. GGML_UNUSED(t);
  1736. return "FALCON";
  1737. }
  1738. std::string vars() override {
  1739. auto n_tokens = hp.n_tokens;
  1740. return VARS_TO_STR1(n_tokens);
  1741. }
  1742. double max_nmse_err() override {
  1743. return 2e-3;
  1744. }
  1745. test_falcon(int n_tokens = 1)
  1746. : test_llm({
  1747. /*n_vocab =*/ 32000,
  1748. /*n_embd =*/ 3200,
  1749. /*n_head =*/ 50,
  1750. /*n_head_kv =*/ 1,
  1751. /*n_rot =*/ 64,
  1752. /*n_embd_head =*/ 64,
  1753. /*n_ff =*/ 8640,
  1754. /*f_norm_eps =*/ 1e-5f,
  1755. /*f_norm_rms_eps =*/ 0.f,
  1756. /*n_tokens =*/ n_tokens,
  1757. }) {
  1758. }
  1759. ggml_tensor * build_graph(ggml_context * ctx) override {
  1760. struct ggml_tensor * cur;
  1761. struct ggml_tensor * inpL;
  1762. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1763. // inp_pos - contains the positions
  1764. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1765. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1766. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1767. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1768. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1769. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1770. // norm
  1771. ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1772. ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1773. ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM);
  1774. // self-attention
  1775. {
  1776. cur = attn_norm;
  1777. ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa());
  1778. cur = ggml_mul_mat(ctx, wqkv, cur);
  1779. struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd)));
  1780. struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd)));
  1781. struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa())));
  1782. Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens);
  1783. Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens);
  1784. // using mode = 2 for neox mode
  1785. Qcur = ggml_rope_ext(
  1786. ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1787. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1788. );
  1789. Kcur = ggml_rope_ext(
  1790. ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1791. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1792. );
  1793. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1794. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1795. }
  1796. struct ggml_tensor * ffn_inp = cur;
  1797. // feed forward
  1798. {
  1799. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1800. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1801. cur = attn_norm;
  1802. cur = ggml_mul_mat(ctx, ffn_up, cur);
  1803. cur = ggml_gelu(ctx, cur);
  1804. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1805. }
  1806. cur = ggml_add(ctx, cur, ffn_inp);
  1807. cur = ggml_add(ctx, cur, inpL);
  1808. // input for next layer
  1809. inpL = cur;
  1810. }
  1811. cur = inpL;
  1812. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1813. ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1814. cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM);
  1815. // lm_head
  1816. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab);
  1817. cur = ggml_mul_mat(ctx, output, cur);
  1818. return cur;
  1819. }
  1820. };
  1821. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
  1822. std::vector<std::unique_ptr<test_case>> test_cases;
  1823. std::default_random_engine rng(0);
  1824. const ggml_type all_types[] = {
  1825. GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
  1826. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  1827. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1828. GGML_TYPE_Q8_0,
  1829. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1830. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  1831. GGML_TYPE_Q6_K,
  1832. GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1833. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1834. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1835. };
  1836. const ggml_type base_types[] = {
  1837. GGML_TYPE_F32, GGML_TYPE_F16,
  1838. GGML_TYPE_Q4_0,
  1839. GGML_TYPE_Q4_K,
  1840. GGML_TYPE_IQ2_XXS
  1841. };
  1842. const ggml_type other_types[] = {
  1843. GGML_TYPE_Q4_1,
  1844. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1845. GGML_TYPE_Q8_0,
  1846. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1847. GGML_TYPE_Q5_K,
  1848. GGML_TYPE_Q6_K,
  1849. GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1850. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1851. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1852. GGML_TYPE_BF16,
  1853. };
  1854. // unary ops
  1855. for (int v : {0, 1}) {
  1856. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  1857. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 128, 10, 10, 10 }, v));
  1858. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 7, 13, 19, 23 }, v));
  1859. }
  1860. }
  1861. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  1862. for (ggml_type type : all_types) {
  1863. for (int b : {1, 7}) {
  1864. for (bool v : {false, true}) {
  1865. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  1866. }
  1867. }
  1868. }
  1869. for (int b : {1, 7}) {
  1870. for (bool v : {false, true}) {
  1871. test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v));
  1872. }
  1873. }
  1874. for (ggml_type type_input : {GGML_TYPE_F32}) {
  1875. for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
  1876. for (int k0 : {1, 3}) {
  1877. for (int k1 : {1, 3}) {
  1878. for (int s0 : {1, 2}) {
  1879. for (int s1 : {1, 2}) {
  1880. for (int p0 : {0, 1}) {
  1881. for (int p1 : {0, 1}) {
  1882. test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1));
  1883. }
  1884. }
  1885. }
  1886. }
  1887. }
  1888. }
  1889. }
  1890. }
  1891. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
  1892. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
  1893. // test cases for 1D im2col
  1894. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  1895. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  1896. // sycl backend will limit task global_range < MAX_INT
  1897. // test cases for 2D im2col with large input W and H (occurs in stable-diffusion)
  1898. // however these cases need to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  1899. // these cases are verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  1900. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  1901. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  1902. test_cases.emplace_back(new test_conv_transpose_1d());
  1903. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1));
  1904. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1));
  1905. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1));
  1906. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1));
  1907. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1));
  1908. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1));
  1909. test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1));
  1910. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
  1911. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1912. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
  1913. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 2, 1}));
  1914. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1915. test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1916. test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1917. test_cases.emplace_back(new test_dup(GGML_TYPE_F32));
  1918. test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
  1919. test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
  1920. test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
  1921. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {0, 2, 1, 3}));
  1922. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {0, 2, 1, 3})); // dup by rows
  1923. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {1, 0, 2, 3}));
  1924. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {1, 0, 2, 3})); // dup dst not-contiguous
  1925. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
  1926. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
  1927. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  1928. for (ggml_type type_dst : all_types) {
  1929. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  1930. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
  1931. }
  1932. }
  1933. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  1934. for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  1935. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
  1936. }
  1937. }
  1938. test_cases.emplace_back(new test_cont());
  1939. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  1940. for (auto op : {ggml_add, ggml_mul, ggml_div}) {
  1941. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  1942. }
  1943. };
  1944. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 8, 1}, {1, 1, 1, 1});
  1945. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1, 1}, {32, 1, 1, 1});
  1946. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 320, 320}, {1, 1, 1, 1});
  1947. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 1, 1}, {1, 1, 1, 1});
  1948. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 1}, {1, 1, 1, 1});
  1949. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 1});
  1950. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 1, 1, 1});
  1951. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 1, 1});
  1952. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 1});
  1953. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 2});
  1954. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 2});
  1955. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 2, 2});
  1956. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 2, 2, 2});
  1957. // stable diffusion
  1958. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 1, 1, 1});
  1959. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 16, 16, 1});
  1960. add_test_bin_bcast(GGML_TYPE_F32, {1280, 16, 16, 1}, {1, 1, 1, 1});
  1961. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 256, 1, 1});
  1962. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {16, 16, 1, 1});
  1963. add_test_bin_bcast(GGML_TYPE_F32, {16, 16, 1280, 1}, {1, 1, 1, 1});
  1964. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {16, 16, 1, 1});
  1965. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 2560, 1}, {16, 16, 1, 1});
  1966. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {32, 32, 1, 1});
  1967. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {32, 32, 1, 1});
  1968. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 640, 1}, {32, 32, 1, 1});
  1969. add_test_bin_bcast(GGML_TYPE_F32, {5120, 1, 1, 1}, {1, 256, 1, 1});
  1970. add_test_bin_bcast(GGML_TYPE_F32, {640, 1, 1, 1}, {1, 1, 1, 1});
  1971. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  1972. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  1973. test_cases.emplace_back(new test_scale());
  1974. for (float eps : {1e-6f, 1e-5f, 1e-3f, 1e-1f}) {
  1975. test_cases.emplace_back(new test_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1976. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1977. }
  1978. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 1, 1}, {4, 1536, 1, 1}));
  1979. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {8, 1536, 1, 1}, {4, 1536, 1, 1}));
  1980. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 4, 1}, {4, 1536, 1, 1}));
  1981. test_cases.emplace_back(new test_ssm_scan(GGML_TYPE_F32, 16, 1024, 32, 4));
  1982. #if 1
  1983. for (ggml_type type_a : base_types) {
  1984. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1985. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1986. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1}));
  1987. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1}));
  1988. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1}));
  1989. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1}));
  1990. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2}));
  1991. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2}));
  1992. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1}));
  1993. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1}));
  1994. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1}));
  1995. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1}));
  1996. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1}));
  1997. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2}));
  1998. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
  1999. }
  2000. }
  2001. #else
  2002. // m = a rows
  2003. // n = b rows
  2004. // k = cols
  2005. std::uniform_int_distribution<> dist_m(1, 128);
  2006. std::uniform_int_distribution<> dist_n(16, 128);
  2007. std::uniform_int_distribution<> dist_k(1, 16);
  2008. for (int i = 0; i < 1000; i++) {
  2009. for (ggml_type type_a : all_types) {
  2010. for (ggml_type type_b : {GGML_TYPE_F32}) {
  2011. int m = dist_m(rng);
  2012. int n = dist_n(rng);
  2013. int k = dist_k(rng) * ggml_blck_size(type_a);
  2014. test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1}, {1, 1}));
  2015. }
  2016. }
  2017. }
  2018. #endif
  2019. for (ggml_type type_a : other_types) {
  2020. for (ggml_type type_b : {GGML_TYPE_F32}) {
  2021. if (ggml_blck_size(type_a) != 256) {
  2022. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, ggml_blck_size(type_a), {1, 1}, {1, 1}));
  2023. }
  2024. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1}));
  2025. }
  2026. }
  2027. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
  2028. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
  2029. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
  2030. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
  2031. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
  2032. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
  2033. // sycl backend will limit task global_range < MAX_INT
  2034. // test case for f16-type-convert-to-fp32 kernel with large k under fp32 compute dtype (occurs in stable-diffusion)
  2035. // however this case needs to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  2036. // this case is verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  2037. // test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 512, 262144, 9216, {1, 1}, {1, 1}));
  2038. for (ggml_type type_a : base_types) {
  2039. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  2040. for (int n_mats : {4, 8}) {
  2041. for (int n_used : {1, 2, 4}) {
  2042. for (bool b : {false, true}) {
  2043. for (int n : {1, 32}) {
  2044. int m = 512;
  2045. int k = 256;
  2046. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  2047. }
  2048. }
  2049. }
  2050. }
  2051. }
  2052. }
  2053. for (ggml_type type_a : other_types) {
  2054. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  2055. for (int n_mats : {4}) {
  2056. for (int n_used : {2}) {
  2057. for (bool b : {false}) {
  2058. for (int n : {1}) {
  2059. int m = 512;
  2060. int k = 256;
  2061. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  2062. }
  2063. }
  2064. }
  2065. }
  2066. }
  2067. }
  2068. test_cases.emplace_back(new test_sqr());
  2069. test_cases.emplace_back(new test_sqrt());
  2070. test_cases.emplace_back(new test_sin());
  2071. test_cases.emplace_back(new test_cos());
  2072. test_cases.emplace_back(new test_clamp());
  2073. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  2074. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 1}, 5));
  2075. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 10}, 5));
  2076. #if 0
  2077. std::uniform_int_distribution<> dist_ne1(1, 50);
  2078. int exponent = 1;
  2079. while (exponent < (1 << 17)) {
  2080. std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
  2081. for (int n = 0; n < 10; ++n) {
  2082. int64_t ne0 = dist_ne0(rng);
  2083. int64_t ne1 = dist_ne1(rng);
  2084. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
  2085. }
  2086. exponent <<= 1;
  2087. }
  2088. #endif
  2089. for (bool mask : {false, true}) {
  2090. for (float max_bias : {0.0f, 8.0f}) {
  2091. if (!mask && max_bias > 0.0f) continue;
  2092. for (float scale : {1.0f, 0.1f}) {
  2093. for (int64_t ne0 : {16, 1024}) {
  2094. for (int64_t ne1 : {16, 1024}) {
  2095. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias));
  2096. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias));
  2097. }
  2098. }
  2099. }
  2100. }
  2101. }
  2102. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, 0.1f, 0.0f));
  2103. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
  2104. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
  2105. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
  2106. {
  2107. bool all = true;
  2108. for (float v : { 0, 1 }) {
  2109. for (float fs : { 1.0f, 1.4245f }) {
  2110. for (float ef : { 0.0f, 0.7465f }) {
  2111. for (float af : { 1.0f, 1.4245f }) {
  2112. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  2113. for (bool ff : {false, true}) { // freq_factors
  2114. test_cases.emplace_back(new test_rope(type, {128, 32, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 7B
  2115. if (all) {
  2116. test_cases.emplace_back(new test_rope(type, {128, 40, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 13B
  2117. test_cases.emplace_back(new test_rope(type, {128, 52, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 30B
  2118. test_cases.emplace_back(new test_rope(type, {128, 64, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 65B
  2119. }
  2120. if (all) {
  2121. test_cases.emplace_back(new test_rope(type, { 64, 1, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  2122. test_cases.emplace_back(new test_rope(type, { 64, 71, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  2123. test_cases.emplace_back(new test_rope(type, { 64, 8, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  2124. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 20, 2, 512, fs, ef, af, ff, v)); // neox (stablelm)
  2125. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 32, 2, 512, fs, ef, af, ff, v)); // neox (phi-2)
  2126. }
  2127. test_cases.emplace_back(new test_rope(type, { 64, 128, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  2128. }
  2129. }
  2130. all = false;
  2131. }
  2132. }
  2133. }
  2134. }
  2135. }
  2136. for (int v : { 0, 1, 2, 3 }) {
  2137. for (int dim : { 0, 1, 2, 3, }) {
  2138. test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
  2139. test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
  2140. }
  2141. }
  2142. for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) {
  2143. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  2144. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  2145. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen
  2146. }
  2147. test_cases.emplace_back(new test_sum_rows());
  2148. test_cases.emplace_back(new test_upscale());
  2149. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
  2150. test_cases.emplace_back(new test_upscale_ext());
  2151. test_cases.emplace_back(new test_group_norm());
  2152. test_cases.emplace_back(new test_acc());
  2153. test_cases.emplace_back(new test_pad());
  2154. test_cases.emplace_back(new test_arange());
  2155. test_cases.emplace_back(new test_timestep_embedding());
  2156. test_cases.emplace_back(new test_leaky_relu());
  2157. for (int hs : { 64, 80, 128, 256, }) {
  2158. for (bool mask : { true, false } ) {
  2159. for (float max_bias : { 0.0f, 8.0f }) {
  2160. if (!mask && max_bias > 0.0f) continue;
  2161. for (float logit_softcap : {0.0f, 10.0f}) {
  2162. if (hs != 128 && logit_softcap != 0.0f) continue;
  2163. for (int nh : { 32, }) {
  2164. for (int kv : { 512, 1024, }) {
  2165. for (int nb : { 1, 2, 4, 8, }) {
  2166. for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
  2167. test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias, logit_softcap, type_KV));
  2168. }
  2169. }
  2170. }
  2171. }
  2172. }
  2173. }
  2174. }
  2175. }
  2176. test_cases.emplace_back(new test_cross_entropy_loss());
  2177. // these tests are disabled to save execution time, but they can be handy for debugging
  2178. #if 0
  2179. test_cases.emplace_back(new test_llama(1));
  2180. test_cases.emplace_back(new test_llama(2));
  2181. test_cases.emplace_back(new test_falcon(1));
  2182. test_cases.emplace_back(new test_falcon(2));
  2183. #endif
  2184. // run tests
  2185. if (mode == MODE_TEST) {
  2186. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  2187. size_t n_ok = 0;
  2188. for (auto & test : test_cases) {
  2189. if (test->eval(backend, backend_cpu, op_name)) {
  2190. n_ok++;
  2191. }
  2192. }
  2193. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  2194. ggml_backend_free(backend_cpu);
  2195. return n_ok == test_cases.size();
  2196. }
  2197. if (mode == MODE_PERF) {
  2198. for (auto & test : test_cases) {
  2199. test->eval_perf(backend, op_name);
  2200. }
  2201. return true;
  2202. }
  2203. GGML_ABORT("fatal error");
  2204. }
  2205. static void usage(char ** argv) {
  2206. printf("Usage: %s [mode] [-o op] [-b backend]\n", argv[0]);
  2207. printf(" valid modes are: test (compare with CPU backend for correctness) or perf (performance evaluation)\n");
  2208. printf(" op names are as given by ggml_op_desc()\n");
  2209. }
  2210. int main(int argc, char ** argv) {
  2211. test_mode mode = MODE_TEST;
  2212. const char * op_name_filter = NULL;
  2213. const char * backend_filter = NULL;
  2214. for (int i = 1; i < argc; i++) {
  2215. if (strcmp(argv[i], "test") == 0) {
  2216. mode = MODE_TEST;
  2217. } else if (strcmp(argv[i], "perf") == 0) {
  2218. mode = MODE_PERF;
  2219. } else if (strcmp(argv[i], "-o") == 0) {
  2220. if (i + 1 < argc) {
  2221. op_name_filter = argv[++i];
  2222. } else {
  2223. usage(argv);
  2224. return 1;
  2225. }
  2226. } else if (strcmp(argv[i], "-b") == 0) {
  2227. if (i + 1 < argc) {
  2228. backend_filter = argv[++i];
  2229. } else {
  2230. usage(argv);
  2231. return 1;
  2232. }
  2233. } else {
  2234. usage(argv);
  2235. return 1;
  2236. }
  2237. }
  2238. // enumerate backends
  2239. printf("Testing %zu backends\n\n", ggml_backend_reg_get_count());
  2240. size_t n_ok = 0;
  2241. for (size_t i = 0; i < ggml_backend_reg_get_count(); i++) {
  2242. printf("Backend %zu/%zu (%s)\n", i + 1, ggml_backend_reg_get_count(), ggml_backend_reg_get_name(i));
  2243. if (backend_filter != NULL && strcmp(backend_filter, ggml_backend_reg_get_name(i)) != 0) {
  2244. printf(" Skipping\n");
  2245. n_ok++;
  2246. continue;
  2247. }
  2248. ggml_backend_t backend = ggml_backend_reg_init_backend(i, NULL);
  2249. GGML_ASSERT(backend != NULL);
  2250. if (backend_filter == NULL && ggml_backend_is_cpu(backend)) {
  2251. printf(" Skipping CPU backend\n");
  2252. ggml_backend_free(backend);
  2253. n_ok++;
  2254. continue;
  2255. }
  2256. printf(" Backend name: %s\n", ggml_backend_name(backend));
  2257. bool ok = test_backend(backend, mode, op_name_filter);
  2258. printf(" Backend %s: ", ggml_backend_name(backend));
  2259. if (ok) {
  2260. printf("\033[1;32mOK\033[0m\n");
  2261. n_ok++;
  2262. } else {
  2263. printf("\033[1;31mFAIL\033[0m\n");
  2264. }
  2265. printf("\n");
  2266. ggml_backend_free(backend);
  2267. }
  2268. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_reg_get_count());
  2269. if (n_ok != ggml_backend_reg_get_count()) {
  2270. printf("\033[1;31mFAIL\033[0m\n");
  2271. return 1;
  2272. }
  2273. ggml_quantize_free();
  2274. printf("\033[1;32mOK\033[0m\n");
  2275. return 0;
  2276. }