test-backend-ops.cpp 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474
  1. #include <ggml.h>
  2. #include <ggml-alloc.h>
  3. #include <ggml-backend.h>
  4. #include <algorithm>
  5. #include <array>
  6. #include <cfloat>
  7. #include <cstring>
  8. #include <functional>
  9. #include <memory>
  10. #include <random>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <string>
  14. #include <thread>
  15. #include <vector>
  16. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  17. // static RNG initialization (revisit if n_threads stops being constant)
  18. static const size_t n_threads = std::thread::hardware_concurrency();
  19. static std::vector<std::default_random_engine> generators = []() {
  20. std::random_device rd;
  21. std::vector<std::default_random_engine> vec;
  22. vec.reserve(n_threads);
  23. //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
  24. for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
  25. return vec;
  26. }();
  27. size_t size = ggml_nelements(tensor);
  28. std::vector<float> data(size);
  29. auto init_thread = [&](size_t ith, size_t start, size_t end) {
  30. std::uniform_real_distribution<float> distribution(min, max);
  31. for (size_t i = start; i < end; i++) {
  32. data[i] = distribution(generators[ith]);
  33. }
  34. };
  35. std::vector<std::thread> threads;
  36. threads.reserve(n_threads);
  37. for (size_t i = 0; i < n_threads; i++) {
  38. size_t start = i*size/n_threads;
  39. size_t end = (i+1)*size/n_threads;
  40. threads.emplace_back(init_thread, i, start, end);
  41. }
  42. for (auto & t : threads) {
  43. t.join();
  44. }
  45. #if 0
  46. const char * val_str = getenv("GGML_TEST_EPS");
  47. float val = 1e-9f;
  48. if (val_str != nullptr) {
  49. val = std::stof(val_str);
  50. printf("GGML_TEST_EPS=%e\n", val);
  51. }
  52. // test quantization with very small values that may result in nan scales due to division by zero
  53. if (ggml_is_quantized(tensor->type)) {
  54. for (int i = 0; i < 256; i++) {
  55. data[i] = val;
  56. }
  57. }
  58. #endif
  59. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  60. ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
  61. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
  62. GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
  63. std::vector<uint8_t> dataq(ggml_row_size(tensor->type, size));
  64. std::vector<float> imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
  65. const float * im = imatrix.data();
  66. if (!ggml_quantize_requires_imatrix(tensor->type)) {
  67. // when the imatrix is optional, we want to test both quantization with and without imatrix
  68. // use one of the random numbers to decide
  69. if (data[0] > 0.5f*(min + max)) {
  70. im = nullptr;
  71. }
  72. }
  73. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
  74. GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
  75. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  76. } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
  77. // This is going to create some weird integers though.
  78. ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
  79. } else {
  80. GGML_ASSERT(false);
  81. }
  82. }
  83. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  84. std::vector<float> tv;
  85. tv.reserve(ggml_nelements(t));
  86. std::vector<uint8_t> buf(ggml_nbytes(t));
  87. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  88. ggml_type_traits_t tt = ggml_internal_get_type_traits(t->type);
  89. size_t bs = ggml_blck_size(t->type);
  90. std::vector<float> vq(ggml_blck_size(t->type));
  91. bool quantized = ggml_is_quantized(t->type);
  92. // access elements by index to avoid gaps in views
  93. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  94. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  95. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  96. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  97. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  98. if (t->type == GGML_TYPE_F16) {
  99. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  100. } else if (t->type == GGML_TYPE_BF16) {
  101. tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
  102. } else if (t->type == GGML_TYPE_F32) {
  103. tv.push_back(*(float *) &buf[i]);
  104. } else if (t->type == GGML_TYPE_I32) {
  105. tv.push_back((float)*(int32_t *) &buf[i]);
  106. } else if (t->type == GGML_TYPE_I16) {
  107. tv.push_back((float)*(int16_t *) &buf[i]);
  108. } else if (t->type == GGML_TYPE_I8) {
  109. tv.push_back((float)*(int8_t *) &buf[i]);
  110. } else if (quantized) {
  111. tt.to_float(&buf[i], vq.data(), bs);
  112. tv.insert(tv.end(), vq.begin(), vq.end());
  113. } else {
  114. GGML_ASSERT(false);
  115. }
  116. }
  117. }
  118. }
  119. }
  120. return tv;
  121. }
  122. /*
  123. static double cosine_similarity(const float * v1, const float * v2, size_t n) {
  124. double dot = 0.0;
  125. double mag1 = 0.0;
  126. double mag2 = 0.0;
  127. for (size_t i = 0; i < n; i++) {
  128. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  129. return -1.0f;
  130. }
  131. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  132. continue;
  133. }
  134. dot += v1[i]*v2[i];
  135. mag1 += v1[i]*v1[i];
  136. mag2 += v2[i]*v2[i];
  137. }
  138. return dot/sqrt(mag1*mag2);
  139. }
  140. static float distance(const float * v1, const float * v2, size_t n) {
  141. double d = 0.0;
  142. for (size_t i = 0; i < n; i++) {
  143. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  144. return INFINITY;
  145. }
  146. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  147. continue;
  148. }
  149. d += (v1[i] - v2[i])*(v1[i] - v2[i]);
  150. }
  151. return sqrt(d);
  152. }
  153. static float vec_len(const float * v, size_t n) {
  154. double d = 0.0;
  155. for (size_t i = 0; i < n; i++) {
  156. if (std::isnan(v[i])) {
  157. return INFINITY;
  158. }
  159. if (std::isinf(v[i])) {
  160. continue;
  161. }
  162. d += v[i]*v[i];
  163. }
  164. return sqrt(d);
  165. }
  166. */
  167. // normalized mean squared error = mse(a, b) / mse(a, 0)
  168. static double nmse(const float * a, const float * b, size_t n) {
  169. double mse_a_b = 0.0;
  170. double mse_a_0 = 0.0;
  171. for (size_t i = 0; i < n; i++) {
  172. float a_i = a[i];
  173. float b_i = b[i];
  174. mse_a_b += (a_i - b_i) * (a_i - b_i);
  175. mse_a_0 += a_i * a_i;
  176. }
  177. return mse_a_b / mse_a_0;
  178. }
  179. // utils for printing the variables of the test cases
  180. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  181. template<typename T>
  182. static std::string var_to_str(const T & x) {
  183. return std::to_string(x);
  184. }
  185. template<typename T, size_t N>
  186. static std::string var_to_str(const T (&x)[N]) {
  187. std::string s = "[";
  188. for (size_t i = 0; i < N; i++) {
  189. if (i > 0) {
  190. s += ",";
  191. }
  192. s += var_to_str(x[i]);
  193. }
  194. s += "]";
  195. return s;
  196. }
  197. template<typename T, size_t N>
  198. static std::string var_to_str(const std::array<T, N> & x) {
  199. std::string s = "[";
  200. for (size_t i = 0; i < N; i++) {
  201. if (i > 0) {
  202. s += ",";
  203. }
  204. s += var_to_str(x[i]);
  205. }
  206. s += "]";
  207. return s;
  208. }
  209. //static std::string var_to_str(ggml_unary_op unary_op) {
  210. // return ggml_unary_op_name(unary_op);
  211. //}
  212. static std::string var_to_str(ggml_type type) {
  213. return ggml_type_name(type);
  214. }
  215. static std::string var_to_str(ggml_op_pool pool) {
  216. switch (pool) {
  217. case GGML_OP_POOL_AVG: return "avg";
  218. case GGML_OP_POOL_MAX: return "max";
  219. default: return std::to_string(pool);
  220. }
  221. }
  222. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  223. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  224. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  225. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  226. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  227. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  228. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  229. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  230. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  231. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  232. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  233. #define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
  234. #ifdef GGML_USE_SYCL
  235. static bool inline _isinf(float f) {
  236. return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
  237. }
  238. #else
  239. static bool inline _isinf(float f) { return std::isinf(f); }
  240. #endif
  241. // accept FLT_MAX as infinity
  242. static bool isinf_or_max(float f) {
  243. return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  244. }
  245. static bool ggml_is_view_op(enum ggml_op op) {
  246. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  247. }
  248. enum test_mode {
  249. MODE_TEST,
  250. MODE_PERF,
  251. };
  252. struct test_case {
  253. virtual ~test_case() {}
  254. virtual std::string op_desc(ggml_tensor * t) {
  255. return ggml_op_desc(t);
  256. }
  257. virtual std::string vars() {
  258. return "";
  259. }
  260. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  261. virtual double max_nmse_err() {
  262. return 1e-7;
  263. }
  264. virtual void initialize_tensors(ggml_context * ctx) {
  265. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  266. init_tensor_uniform(t);
  267. }
  268. }
  269. virtual size_t op_size(ggml_tensor * t) {
  270. size_t size = ggml_nbytes(t);
  271. // add source tensors
  272. for (int i = 0; i < GGML_MAX_SRC; i++) {
  273. if (t->src[i] != NULL) {
  274. size += ggml_nbytes(t->src[i]);
  275. }
  276. }
  277. return size;
  278. }
  279. ggml_cgraph * gf = nullptr;
  280. static const int sentinel_size = 1024;
  281. test_mode mode;
  282. std::vector<ggml_tensor *> sentinels;
  283. void add_sentinel(ggml_context * ctx) {
  284. if (mode == MODE_PERF) {
  285. return;
  286. }
  287. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  288. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  289. sentinels.push_back(sentinel);
  290. }
  291. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  292. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  293. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  294. add_sentinel(ctx);
  295. return t;
  296. }
  297. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  298. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  299. add_sentinel(ctx);
  300. return t;
  301. }
  302. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  303. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  304. add_sentinel(ctx);
  305. return t;
  306. }
  307. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  308. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  309. add_sentinel(ctx);
  310. return t;
  311. }
  312. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  313. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  314. add_sentinel(ctx);
  315. return t;
  316. }
  317. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  318. mode = MODE_TEST;
  319. ggml_init_params params = {
  320. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  321. /* .mem_base = */ NULL,
  322. /* .no_alloc = */ true,
  323. };
  324. ggml_context * ctx = ggml_init(params);
  325. gf = ggml_new_graph(ctx);
  326. // pre-graph sentinel
  327. add_sentinel(ctx);
  328. ggml_tensor * out = build_graph(ctx);
  329. if (op_name != nullptr && op_desc(out) != op_name) {
  330. //printf(" %s: skipping\n", op_desc(out).c_str());
  331. ggml_free(ctx);
  332. return true;
  333. }
  334. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  335. fflush(stdout);
  336. // check if the backends support the ops
  337. bool supported = true;
  338. for (ggml_backend_t backend : {backend1, backend2}) {
  339. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  340. if (!ggml_backend_supports_op(backend, t)) {
  341. printf("not supported [%s] ", ggml_backend_name(backend));
  342. supported = false;
  343. break;
  344. }
  345. }
  346. }
  347. if (!supported) {
  348. printf("\n");
  349. ggml_free(ctx);
  350. return true;
  351. }
  352. // post-graph sentinel
  353. add_sentinel(ctx);
  354. // allocate
  355. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  356. if (buf == NULL) {
  357. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
  358. ggml_free(ctx);
  359. return false;
  360. }
  361. // build graph
  362. ggml_build_forward_expand(gf, out);
  363. // add sentinels as graph nodes so that they are checked in the callback
  364. for (ggml_tensor * sentinel : sentinels) {
  365. gf->nodes[gf->n_nodes++] = sentinel;
  366. }
  367. // randomize tensors
  368. initialize_tensors(ctx);
  369. // compare
  370. struct callback_userdata {
  371. bool ok;
  372. double max_err;
  373. ggml_backend_t backend1;
  374. ggml_backend_t backend2;
  375. };
  376. callback_userdata ud {
  377. true,
  378. max_nmse_err(),
  379. backend1,
  380. backend2
  381. };
  382. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  383. callback_userdata * ud = (callback_userdata *) user_data;
  384. const char * bn1 = ggml_backend_name(ud->backend1);
  385. const char * bn2 = ggml_backend_name(ud->backend2);
  386. if (t1->op == GGML_OP_NONE) {
  387. // sentinels must be unchanged
  388. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  389. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  390. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  391. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  392. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  393. printf("sentinel mismatch: %s ", t1->name);
  394. ud->ok = false;
  395. return true;
  396. }
  397. }
  398. std::vector<float> f1 = tensor_to_float(t1);
  399. std::vector<float> f2 = tensor_to_float(t2);
  400. for (size_t i = 0; i < f1.size(); i++) {
  401. // check for nans
  402. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  403. printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
  404. ud->ok = false;
  405. return true;
  406. }
  407. // check for infs: both must be inf of the same sign, or both must be finite
  408. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  409. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  410. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  411. printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  412. ud->ok = false;
  413. return true;
  414. }
  415. } else {
  416. printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  417. ud->ok = false;
  418. return true;
  419. }
  420. }
  421. }
  422. double err = nmse(f1.data(), f2.data(), f1.size());
  423. if (err > ud->max_err) {
  424. printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
  425. //for (int i = 0; i < (int) f1.size(); i++) {
  426. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  427. //}
  428. //printf("\n");
  429. //exit(1);
  430. ud->ok = false;
  431. }
  432. return true;
  433. GGML_UNUSED(index);
  434. };
  435. const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
  436. if (!cmp_ok) {
  437. printf("compare failed ");
  438. }
  439. ggml_backend_buffer_free(buf);
  440. ggml_free(ctx);
  441. if (ud.ok && cmp_ok) {
  442. printf("\033[1;32mOK\033[0m\n");
  443. return true;
  444. }
  445. printf("\033[1;31mFAIL\033[0m\n");
  446. return false;
  447. }
  448. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  449. mode = MODE_PERF;
  450. static const size_t graph_nodes = 8192;
  451. ggml_init_params params = {
  452. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  453. /* .mem_base = */ NULL,
  454. /* .no_alloc = */ true,
  455. };
  456. ggml_context * ctx = ggml_init(params);
  457. ggml_tensor * out = build_graph(ctx);
  458. if (op_name != nullptr && op_desc(out) != op_name) {
  459. //printf(" %s: skipping\n", op_desc(out).c_str());
  460. ggml_free(ctx);
  461. return true;
  462. }
  463. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  464. fflush(stdout);
  465. // check if backends support op
  466. if (!ggml_backend_supports_op(backend, out)) {
  467. printf("not supported\n");
  468. ggml_free(ctx);
  469. return true;
  470. }
  471. // align while also leaving some margin for variations in parameters
  472. int align = 20;
  473. int last = (len + align - 1) / align * align;
  474. if (last - len < 5) {
  475. last += align;
  476. }
  477. last = std::max(last, 60);
  478. printf("%*s", last - len, "");
  479. // allocate
  480. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  481. if (buf == NULL) {
  482. printf("failed to allocate tensors\n");
  483. ggml_free(ctx);
  484. return false;
  485. }
  486. // randomize tensors
  487. initialize_tensors(ctx);
  488. // build graph
  489. ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
  490. ggml_build_forward_expand(gf, out);
  491. // warmup run
  492. ggml_backend_graph_compute(backend, gf);
  493. // duplicate the op
  494. size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
  495. int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
  496. for (int i = 1; i < n_runs; i++) {
  497. gf->nodes[gf->n_nodes++] = out;
  498. }
  499. // calculate memory
  500. size_t mem = n_runs * op_size(out);
  501. auto tensor_op_size = [](ggml_tensor * t) {
  502. size_t size = ggml_nbytes(t);
  503. // add source tensors
  504. for (int i = 0; i < GGML_MAX_SRC; i++) {
  505. if (t->src[i] != NULL) {
  506. size += ggml_nbytes(t->src[i]);
  507. }
  508. }
  509. return size;
  510. };
  511. for (int i = 0; i < gf->n_nodes; i++) {
  512. if (ggml_is_view_op(gf->nodes[i]->op) || gf->nodes[i] == out) {
  513. continue;
  514. }
  515. mem += tensor_op_size(gf->nodes[i]);
  516. }
  517. // run
  518. ggml_backend_synchronize(backend);
  519. int64_t start_time = ggml_time_us();
  520. ggml_backend_graph_compute(backend, gf);
  521. ggml_backend_synchronize(backend);
  522. int64_t end_time = ggml_time_us();
  523. double time_us = end_time - start_time;
  524. printf(" %5d runs - %8.2f us/run - %8zu kB/run - \033[1;34m%7.2f GB/s\033[0m\n",
  525. n_runs,
  526. time_us / n_runs,
  527. op_size(out) / 1024,
  528. mem / (time_us/1e6) / 1024.0 / 1024.0 / 1024.0);
  529. ggml_backend_buffer_free(buf);
  530. ggml_free(ctx);
  531. return true;
  532. }
  533. };
  534. // GGML_OP_UNARY
  535. struct test_unary : public test_case {
  536. const ggml_unary_op op;
  537. const ggml_type type;
  538. const std::array<int64_t, 4> ne_a;
  539. int v; // view (1 : non-contiguous a)
  540. std::string vars() override {
  541. return VARS_TO_STR3(type, ne_a, v);
  542. }
  543. test_unary(ggml_unary_op op,
  544. ggml_type type = GGML_TYPE_F32,
  545. std::array<int64_t, 4> ne_a = {128, 10, 10, 10},
  546. int v = 0)
  547. : op(op), type(type), ne_a(ne_a), v(v) {}
  548. ggml_tensor * build_graph(ggml_context * ctx) override {
  549. ggml_tensor * a;
  550. if (v & 1) {
  551. auto ne = ne_a; ne[0] *= 3;
  552. a = ggml_new_tensor(ctx, type, 4, ne.data());
  553. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  554. } else {
  555. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  556. }
  557. ggml_tensor * out = ggml_unary(ctx, a, op);
  558. return out;
  559. }
  560. void initialize_tensors(ggml_context * ctx) override {
  561. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  562. // test extended range of values to check for NaNs in GELU
  563. init_tensor_uniform(t, -150.f, 150.f);
  564. }
  565. }
  566. };
  567. // GGML_OP_GET_ROWS
  568. struct test_get_rows : public test_case {
  569. const ggml_type type;
  570. const int n; // cols
  571. const int m; // rows
  572. const int r; // rows to get
  573. const int b; // batch size
  574. const bool v; // view (non-contiguous src1)
  575. std::string vars() override {
  576. return VARS_TO_STR6(type, n, m, r, b, v);
  577. }
  578. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  579. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  580. ggml_tensor * build_graph(ggml_context * ctx) override {
  581. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  582. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  583. if (v) {
  584. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  585. }
  586. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  587. return out;
  588. }
  589. void initialize_tensors(ggml_context * ctx) override {
  590. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  591. if (t->type == GGML_TYPE_I32) {
  592. if (ggml_is_view_op(t->op)) { continue; }
  593. // rows
  594. std::vector<int> data(r*b);
  595. for (int i = 0; i < r*b; i++) {
  596. data[i] = rand() % m;
  597. }
  598. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  599. } else {
  600. init_tensor_uniform(t);
  601. }
  602. }
  603. }
  604. };
  605. // GGML_OP_REPEAT
  606. struct test_repeat : public test_case {
  607. const ggml_type type;
  608. const std::array<int64_t, 4> ne;
  609. const std::array<int, 4> nr;
  610. std::string vars() override {
  611. return VARS_TO_STR3(type, ne, nr);
  612. }
  613. size_t op_size(ggml_tensor * t) override {
  614. return ggml_nbytes(t) * 2;
  615. }
  616. test_repeat(ggml_type type = GGML_TYPE_F32,
  617. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  618. std::array<int, 4> nr = {2, 2, 2, 2})
  619. : type(type), ne(ne), nr(nr) {}
  620. ggml_tensor * build_graph(ggml_context * ctx) override {
  621. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  622. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  623. ggml_tensor * out = ggml_repeat(ctx, src, target);
  624. return out;
  625. }
  626. };
  627. // GGML_OP_DUP
  628. struct test_dup : public test_case {
  629. const ggml_type type;
  630. const std::array<int64_t, 4> ne;
  631. const std::array<int64_t, 4> permute;
  632. bool _use_permute;
  633. std::string vars() override {
  634. std::string v = VARS_TO_STR2(type, ne);
  635. if (_use_permute) v += "," + VAR_TO_STR(permute);
  636. return v;
  637. }
  638. test_dup(ggml_type type = GGML_TYPE_F32,
  639. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  640. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  641. : type(type), ne(ne), permute(permute),
  642. _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  643. ggml_tensor * build_graph(ggml_context * ctx) override {
  644. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  645. if (_use_permute) {
  646. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  647. }
  648. ggml_tensor * out = ggml_dup(ctx, src);
  649. return out;
  650. }
  651. };
  652. // GGML_OP_CPY
  653. struct test_cpy : public test_case {
  654. const ggml_type type_src;
  655. const ggml_type type_dst;
  656. const std::array<int64_t, 4> ne;
  657. std::string vars() override {
  658. return VARS_TO_STR3(type_src, type_dst, ne);
  659. }
  660. double max_nmse_err() override {
  661. return 1e-6;
  662. }
  663. size_t op_size(ggml_tensor * t) override {
  664. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  665. }
  666. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  667. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  668. : type_src(type_src), type_dst(type_dst), ne(ne) {}
  669. ggml_tensor * build_graph(ggml_context * ctx) override {
  670. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  671. ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, ne.data());
  672. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  673. return out;
  674. }
  675. };
  676. // GGML_OP_CONT
  677. struct test_cont : public test_case {
  678. const ggml_type type;
  679. const std::array<int64_t, 4> ne;
  680. std::string vars() override {
  681. return VARS_TO_STR2(type, ne);
  682. }
  683. test_cont(ggml_type type = GGML_TYPE_F32,
  684. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  685. : type(type), ne(ne) {}
  686. ggml_tensor * build_graph(ggml_context * ctx) override {
  687. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  688. src = ggml_transpose(ctx, src);
  689. ggml_tensor * out = ggml_cont(ctx, src);
  690. return out;
  691. }
  692. };
  693. // GGML_OP_ADD
  694. // GGML_OP_MUL
  695. // GGML_OP_DIV
  696. struct test_bin_bcast : public test_case {
  697. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  698. op_t op;
  699. const ggml_type type;
  700. const std::array<int64_t, 4> ne;
  701. const std::array<int, 4> nr;
  702. std::string vars() override {
  703. return VARS_TO_STR3(type, ne, nr);
  704. }
  705. size_t op_size(ggml_tensor * t) override {
  706. return ggml_nbytes(t) * 3;
  707. }
  708. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  709. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  710. std::array<int, 4> nr = {1, 2, 1, 1})
  711. : op(op), type(type), ne(ne), nr(nr) {}
  712. ggml_tensor * build_graph(ggml_context * ctx) override {
  713. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  714. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  715. ggml_tensor * out = op(ctx, a, b);
  716. return out;
  717. }
  718. void initialize_tensors(ggml_context * ctx) override {
  719. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  720. if (op == ggml_div) {
  721. // avoid division by zero
  722. init_tensor_uniform(t, 1.0f, 2.0f);
  723. } else {
  724. init_tensor_uniform(t);
  725. }
  726. }
  727. }
  728. };
  729. // GGML_OP_SCALE
  730. struct test_scale : public test_case {
  731. const ggml_type type;
  732. const std::array<int64_t, 4> ne;
  733. float scale;
  734. std::string vars() override {
  735. return VARS_TO_STR3(type, ne, scale);
  736. }
  737. test_scale(ggml_type type = GGML_TYPE_F32,
  738. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  739. float scale = 2.0f)
  740. : type(type), ne(ne), scale(scale) {}
  741. ggml_tensor * build_graph(ggml_context * ctx) override {
  742. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  743. ggml_tensor * out = ggml_scale(ctx, a, scale);
  744. return out;
  745. }
  746. };
  747. // GGML_OP_NORM
  748. struct test_norm : public test_case {
  749. const ggml_type type;
  750. const std::array<int64_t, 4> ne;
  751. float eps;
  752. std::string vars() override {
  753. return VARS_TO_STR3(type, ne, eps);
  754. }
  755. test_norm(ggml_type type = GGML_TYPE_F32,
  756. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  757. float eps = 1e-6f)
  758. : type(type), ne(ne), eps(eps) {}
  759. ggml_tensor * build_graph(ggml_context * ctx) override {
  760. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  761. ggml_tensor * out = ggml_norm(ctx, a, eps);
  762. return out;
  763. }
  764. };
  765. // GGML_OP_RMS_NORM
  766. struct test_rms_norm : public test_case {
  767. const ggml_type type;
  768. const std::array<int64_t, 4> ne;
  769. float eps;
  770. std::string vars() override {
  771. return VARS_TO_STR3(type, ne, eps);
  772. }
  773. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  774. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  775. float eps = 1e-6f)
  776. : type(type), ne(ne), eps(eps) {}
  777. ggml_tensor * build_graph(ggml_context * ctx) override {
  778. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  779. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  780. return out;
  781. }
  782. };
  783. // GGML_OP_MUL_MAT
  784. struct test_mul_mat : public test_case {
  785. const ggml_type type_a;
  786. const ggml_type type_b;
  787. const int64_t m;
  788. const int64_t n;
  789. const int64_t k;
  790. const std::array<int64_t, 2> bs; // dims 3 and 4
  791. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  792. std::string vars() override {
  793. return VARS_TO_STR7(type_a, type_b, m, n, k, bs, nr);
  794. }
  795. double max_nmse_err() override {
  796. return 5e-4;
  797. }
  798. size_t op_size(ggml_tensor * t) override {
  799. size_t a = ggml_nbytes(t->src[0]) * n * nr[0] * nr[1];
  800. size_t b = ggml_nbytes(t->src[1]) * m;
  801. size_t c = ggml_nbytes(t);
  802. return a + b + c;
  803. GGML_UNUSED(t);
  804. }
  805. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  806. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  807. std::array<int64_t, 2> bs = {10, 10},
  808. std::array<int64_t, 2> nr = {2, 2})
  809. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr) {}
  810. ggml_tensor * build_graph(ggml_context * ctx) override {
  811. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  812. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0] , bs[1]);
  813. ggml_tensor * b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  814. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  815. return out;
  816. }
  817. };
  818. // GGML_OP_MUL_MAT_ID
  819. struct test_mul_mat_id : public test_case {
  820. const ggml_type type_a;
  821. const ggml_type type_b;
  822. const int n_mats;
  823. const int n_used;
  824. const bool b; // brodcast b matrix
  825. const int64_t m;
  826. const int64_t n;
  827. const int64_t k;
  828. std::string vars() override {
  829. return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k);
  830. }
  831. double max_nmse_err() override {
  832. return 5e-4;
  833. }
  834. size_t op_size(ggml_tensor * t) override {
  835. size_t a = ggml_nbytes(t->src[2]) * n;
  836. size_t b = ggml_nbytes(t->src[1]) * m;
  837. size_t c = ggml_nbytes(t);
  838. return a + b + c;
  839. GGML_UNUSED(t);
  840. }
  841. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  842. int n_mats = 8, int n_used = 2, bool b = false,
  843. int64_t m = 32, int64_t n = 32, int64_t k = 32)
  844. : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b),
  845. m(m), n(n), k(k) {
  846. GGML_ASSERT(n_used <= n_mats);
  847. }
  848. ggml_tensor * build_graph(ggml_context * ctx) override {
  849. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  850. ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats);
  851. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  852. if (n_used != n_mats) {
  853. ids = ggml_view_2d(ctx, ids, n_used, n, ids->nb[1], 0);
  854. }
  855. ggml_tensor * b = ggml_new_tensor_3d(ctx, type_b, k, this->b ? 1 : n_used, n);
  856. ggml_tensor * out = ggml_mul_mat_id(ctx, as, b, ids);
  857. return out;
  858. }
  859. void initialize_tensors(ggml_context * ctx) override {
  860. std::random_device rd;
  861. std::default_random_engine rng(rd());
  862. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  863. if (t->type == GGML_TYPE_I32) {
  864. if (ggml_is_view_op(t->op)) { continue; }
  865. // ids
  866. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  867. std::vector<int32_t> data(t->ne[0]);
  868. for (int i = 0; i < t->ne[0]; i++) {
  869. data[i] = i % n_mats;
  870. }
  871. std::shuffle(data.begin(), data.end(), rng);
  872. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  873. }
  874. } else {
  875. init_tensor_uniform(t);
  876. }
  877. }
  878. }
  879. };
  880. // GGML_OP_SQR
  881. struct test_sqr : public test_case {
  882. const ggml_type type;
  883. const std::array<int64_t, 4> ne;
  884. std::string vars() override {
  885. return VARS_TO_STR2(type, ne);
  886. }
  887. test_sqr(ggml_type type = GGML_TYPE_F32,
  888. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  889. : type(type), ne(ne) {}
  890. ggml_tensor * build_graph(ggml_context * ctx) override {
  891. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  892. ggml_tensor * out = ggml_sqr(ctx, a);
  893. return out;
  894. }
  895. };
  896. // GGML_OP_SQRT
  897. struct test_sqrt : public test_case {
  898. const ggml_type type;
  899. const std::array<int64_t, 4> ne;
  900. std::string vars() override {
  901. return VARS_TO_STR2(type, ne);
  902. }
  903. test_sqrt(ggml_type type = GGML_TYPE_F32,
  904. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  905. : type(type), ne(ne) {}
  906. ggml_tensor * build_graph(ggml_context * ctx) override {
  907. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  908. ggml_tensor * out = ggml_sqrt(ctx, a);
  909. return out;
  910. }
  911. void initialize_tensors(ggml_context * ctx) override {
  912. // fill with positive values
  913. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  914. init_tensor_uniform(t, 0.0f, 100.0f);
  915. }
  916. }
  917. };
  918. // GGML_OP_CLAMP
  919. struct test_clamp : public test_case {
  920. const ggml_type type;
  921. const std::array<int64_t, 4> ne;
  922. float min;
  923. float max;
  924. std::string vars() override {
  925. return VARS_TO_STR4(type, ne, min, max);
  926. }
  927. test_clamp(ggml_type type = GGML_TYPE_F32,
  928. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  929. float min = -0.5f, float max = 0.5f)
  930. : type(type), ne(ne), min(min), max(max) {}
  931. ggml_tensor * build_graph(ggml_context * ctx) override {
  932. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  933. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  934. return out;
  935. }
  936. };
  937. // GGML_OP_DIAG_MASK_INF
  938. struct test_diag_mask_inf : public test_case {
  939. const ggml_type type;
  940. const std::array<int64_t, 4> ne;
  941. const int n_past;
  942. std::string vars() override {
  943. return VARS_TO_STR3(type, ne, n_past);
  944. }
  945. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  946. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  947. int n_past = 5)
  948. : type(type), ne(ne), n_past(n_past) {}
  949. ggml_tensor * build_graph(ggml_context * ctx) override {
  950. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  951. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  952. return out;
  953. }
  954. };
  955. // GGML_OP_SOFT_MAX
  956. struct test_soft_max : public test_case {
  957. const ggml_type type;
  958. const std::array<int64_t, 4> ne;
  959. const bool mask;
  960. const float scale;
  961. const float max_bias;
  962. std::string vars() override {
  963. return VARS_TO_STR5(type, ne, mask, scale, max_bias);
  964. }
  965. // the 1024 test with bias occasionally fails:
  966. // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL
  967. virtual double max_nmse_err() override {
  968. return 1e-6;
  969. }
  970. test_soft_max(ggml_type type = GGML_TYPE_F32,
  971. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  972. bool mask = false,
  973. float scale = 1.0f,
  974. float max_bias = 0.0f)
  975. : type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {}
  976. ggml_tensor * build_graph(ggml_context * ctx) override {
  977. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  978. ggml_tensor * mask = nullptr;
  979. if (this->mask) {
  980. mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
  981. }
  982. ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
  983. return out;
  984. }
  985. };
  986. // GGML_OP_ROPE
  987. struct test_rope : public test_case {
  988. const ggml_type type;
  989. const std::array<int64_t, 4> ne_a;
  990. int n_dims;
  991. int mode;
  992. int n_ctx; // used to generate positions
  993. float fs; // freq_scale
  994. float ef; // ext_factor
  995. float af; // attn_factor
  996. bool ff;
  997. int v; // view (1 : non-contiguous a)
  998. std::string vars() override {
  999. return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v);
  1000. }
  1001. test_rope(ggml_type type = GGML_TYPE_F32,
  1002. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  1003. int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f, float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0)
  1004. : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v) {}
  1005. ggml_tensor * build_graph(ggml_context * ctx) override {
  1006. ggml_tensor * a;
  1007. if (v & 1) {
  1008. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1009. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1010. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1011. } else {
  1012. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1013. }
  1014. ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]);
  1015. ggml_tensor * freq = ff ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2) : nullptr;
  1016. ggml_tensor * out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1017. return out;
  1018. }
  1019. void initialize_tensors(ggml_context * ctx) override {
  1020. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1021. if (t->type == GGML_TYPE_I32) {
  1022. // pos
  1023. std::vector<int> data(ne_a[2]);
  1024. for (int i = 0; i < ne_a[2]; i++) {
  1025. data[i] = rand() % n_ctx;
  1026. }
  1027. ggml_backend_tensor_set(t, data.data(), 0, ne_a[2] * sizeof(int));
  1028. } else {
  1029. if (t->ne[0] == n_dims/2) {
  1030. // frequency factors in the range [0.9f, 1.1f]
  1031. init_tensor_uniform(t, 0.9f, 1.1f);
  1032. } else {
  1033. init_tensor_uniform(t);
  1034. }
  1035. }
  1036. }
  1037. }
  1038. };
  1039. // GGML_OP_POOL2D
  1040. struct test_pool2d : public test_case {
  1041. enum ggml_op_pool pool_type;
  1042. const ggml_type type_input;
  1043. const std::array<int64_t, 4> ne_input;
  1044. // kernel size
  1045. const int k0;
  1046. const int k1;
  1047. // stride
  1048. const int s0;
  1049. const int s1;
  1050. // padding
  1051. const int p0;
  1052. const int p1;
  1053. std::string vars() override {
  1054. return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1);
  1055. }
  1056. test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG,
  1057. ggml_type type_input = GGML_TYPE_F32,
  1058. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1059. int k0 = 3, int k1 = 3,
  1060. int s0 = 1, int s1 = 1,
  1061. int p0 = 1, int p1 = 1)
  1062. : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {}
  1063. ggml_tensor * build_graph(ggml_context * ctx) override {
  1064. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1065. ggml_tensor * out = ggml_pool_2d(ctx, input, pool_type, k0, k1, s0, s1, p0, p1);
  1066. return out;
  1067. }
  1068. };
  1069. // GGML_OP_IM2COL
  1070. struct test_im2col : public test_case {
  1071. const ggml_type type_input;
  1072. const ggml_type type_kernel;
  1073. const ggml_type dst_type;
  1074. const std::array<int64_t, 4> ne_input;
  1075. const std::array<int64_t, 4> ne_kernel;
  1076. // stride
  1077. const int s0;
  1078. const int s1;
  1079. // padding
  1080. const int p0;
  1081. const int p1;
  1082. // dilatation
  1083. const int d0;
  1084. const int d1;
  1085. // mode
  1086. const bool is_2D;
  1087. std::string vars() override {
  1088. return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  1089. }
  1090. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32,
  1091. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1092. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1093. int s0 = 1, int s1 = 1,
  1094. int p0 = 1, int p1 = 1,
  1095. int d0 = 1, int d1 = 1,
  1096. bool is_2D = true)
  1097. : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  1098. ggml_tensor * build_graph(ggml_context * ctx) override {
  1099. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1100. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  1101. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D, dst_type);
  1102. return out;
  1103. }
  1104. };
  1105. // GGML_OP_CONCAT
  1106. struct test_concat : public test_case {
  1107. const ggml_type type;
  1108. const std::array<int64_t, 4> ne_a;
  1109. const int64_t ne_b_d;
  1110. const int dim;
  1111. const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b)
  1112. std::string vars() override {
  1113. return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v);
  1114. }
  1115. test_concat(ggml_type type = GGML_TYPE_F32,
  1116. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1117. int64_t ne_b_d = 10,
  1118. int dim = 2, int v = 0)
  1119. : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {}
  1120. ggml_tensor * build_graph(ggml_context * ctx) override {
  1121. auto ne_b = ne_a;
  1122. ne_b[dim] = ne_b_d;
  1123. ggml_tensor * a;
  1124. if (v & 1) {
  1125. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1126. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1127. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1128. } else {
  1129. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1130. }
  1131. ggml_tensor * b;
  1132. if (v & 2) {
  1133. auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4;
  1134. b = ggml_new_tensor(ctx, type, 4, ne.data());
  1135. b = ggml_view_4d(ctx, b, ne_b[0], ne_b[1], ne_b[2], ne_b[3], b->nb[1], b->nb[2], b->nb[3], 0);
  1136. } else {
  1137. b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1138. }
  1139. ggml_tensor * out = ggml_concat(ctx, a, b, dim);
  1140. return out;
  1141. }
  1142. };
  1143. // GGML_OP_ARGSORT
  1144. struct test_argsort : public test_case {
  1145. const ggml_type type;
  1146. const std::array<int64_t, 4> ne;
  1147. ggml_sort_order order;
  1148. std::string vars() override {
  1149. return VARS_TO_STR3(type, ne, order);
  1150. }
  1151. test_argsort(ggml_type type = GGML_TYPE_F32,
  1152. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  1153. ggml_sort_order order = GGML_SORT_ORDER_ASC)
  1154. : type(type), ne(ne), order(order) {}
  1155. ggml_tensor * build_graph(ggml_context * ctx) override {
  1156. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1157. ggml_tensor * out = ggml_argsort(ctx, a, order);
  1158. return out;
  1159. }
  1160. void initialize_tensors(ggml_context * ctx) override {
  1161. std::random_device rd;
  1162. std::default_random_engine rng(rd());
  1163. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1164. if (t->type == GGML_TYPE_I32) {
  1165. // indices
  1166. std::vector<int> data(ggml_nelements(t));
  1167. for (int i = 0; i < ggml_nelements(t); i++) {
  1168. data[i] = rand();
  1169. }
  1170. std::shuffle(data.begin(), data.end(), rng);
  1171. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  1172. } else if (t->type == GGML_TYPE_F32) {
  1173. // initialize with unique values to avoid ties
  1174. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1175. std::vector<float> data(t->ne[0]);
  1176. for (int i = 0; i < t->ne[0]; i++) {
  1177. data[i] = i;
  1178. }
  1179. std::shuffle(data.begin(), data.end(), rng);
  1180. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  1181. }
  1182. } else {
  1183. GGML_ASSERT(false);
  1184. }
  1185. }
  1186. }
  1187. };
  1188. // GGML_OP_SUM_ROWS
  1189. struct test_sum_rows : public test_case {
  1190. const ggml_type type;
  1191. const std::array<int64_t, 4> ne;
  1192. std::string vars() override {
  1193. return VARS_TO_STR2(type, ne);
  1194. }
  1195. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  1196. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1197. : type(type), ne(ne) {}
  1198. ggml_tensor * build_graph(ggml_context * ctx) override {
  1199. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1200. ggml_tensor * out = ggml_sum_rows(ctx, a);
  1201. return out;
  1202. }
  1203. };
  1204. // GGML_OP_UPSCALE
  1205. struct test_upscale : public test_case {
  1206. const ggml_type type;
  1207. const std::array<int64_t, 4> ne;
  1208. const int32_t scale_factor;
  1209. const bool transpose;
  1210. std::string vars() override {
  1211. return VARS_TO_STR4(type, ne, scale_factor, transpose);
  1212. }
  1213. test_upscale(ggml_type type = GGML_TYPE_F32,
  1214. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  1215. int32_t scale_factor = 2, bool transpose = false)
  1216. : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {}
  1217. ggml_tensor * build_graph(ggml_context * ctx) override {
  1218. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1219. if (transpose) a = ggml_transpose(ctx, a);
  1220. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
  1221. return out;
  1222. }
  1223. };
  1224. // GGML_OP_UPSCALE (ext)
  1225. struct test_upscale_ext : public test_case {
  1226. const ggml_type type;
  1227. const std::array<int64_t, 4> ne;
  1228. const std::array<int64_t, 4> ne_tgt;
  1229. std::string vars() override {
  1230. return VARS_TO_STR3(type, ne, ne_tgt);
  1231. }
  1232. test_upscale_ext(ggml_type type = GGML_TYPE_F32,
  1233. std::array<int64_t, 4> ne = {2, 5, 7, 11},
  1234. std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13})
  1235. : type(type), ne(ne), ne_tgt(ne_tgt) {}
  1236. ggml_tensor * build_graph(ggml_context * ctx) override {
  1237. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1238. ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]);
  1239. return out;
  1240. }
  1241. };
  1242. // GGML_OP_GROUP_NORM
  1243. struct test_group_norm : public test_case {
  1244. const ggml_type type;
  1245. const std::array<int64_t, 4> ne;
  1246. const int32_t num_groups;
  1247. std::string vars() override {
  1248. return VARS_TO_STR3(type, ne, num_groups);
  1249. }
  1250. test_group_norm(ggml_type type = GGML_TYPE_F32,
  1251. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  1252. int32_t num_groups = 32)
  1253. : type(type), ne(ne), num_groups(num_groups) {}
  1254. ggml_tensor * build_graph(ggml_context * ctx) override {
  1255. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1256. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups);
  1257. return out;
  1258. }
  1259. };
  1260. // GGML_OP_ACC
  1261. struct test_acc : public test_case {
  1262. const ggml_type type;
  1263. const std::array<int64_t, 4> ne_a;
  1264. const std::array<int64_t, 4> ne_b;
  1265. std::string vars() override {
  1266. return VARS_TO_STR3(type, ne_a, ne_b);
  1267. }
  1268. test_acc(ggml_type type = GGML_TYPE_F32,
  1269. std::array<int64_t, 4> ne_a = {1024, 577, 1, 1},
  1270. std::array<int64_t, 4> ne_b = {1024, 576, 1, 1})
  1271. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1272. ggml_tensor * build_graph(ggml_context * ctx) override {
  1273. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1274. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1275. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  1276. return out;
  1277. }
  1278. };
  1279. // GGML_OP_PAD
  1280. struct test_pad : public test_case {
  1281. const ggml_type type;
  1282. const std::array<int64_t, 4> ne_a;
  1283. const int pad_0;
  1284. const int pad_1;
  1285. std::string vars() override {
  1286. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  1287. }
  1288. test_pad(ggml_type type = GGML_TYPE_F32,
  1289. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  1290. int pad_0 = 1, int pad_1 = 1)
  1291. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  1292. ggml_tensor * build_graph(ggml_context * ctx) override {
  1293. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1294. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  1295. return out;
  1296. }
  1297. };
  1298. // GGML_OP_ARANGE
  1299. struct test_arange : public test_case {
  1300. const ggml_type type;
  1301. const float start;
  1302. const float stop;
  1303. const float step;
  1304. std::string vars() override {
  1305. return VARS_TO_STR4(type, start, stop, step);
  1306. }
  1307. test_arange(ggml_type type = GGML_TYPE_F32,
  1308. float start = 0.f, float stop = 10.f, float step = 1.f)
  1309. : type(type), start(start), stop(stop), step(step) {}
  1310. ggml_tensor * build_graph(ggml_context * ctx) override {
  1311. ggml_tensor * out = ggml_arange(ctx, start, stop, step);
  1312. return out;
  1313. }
  1314. };
  1315. // GGML_OP_TIMESTEP_EMBEDDING
  1316. struct test_timestep_embedding : public test_case {
  1317. const ggml_type type;
  1318. const std::array<int64_t, 4> ne_a;
  1319. const int dim;
  1320. const int max_period;
  1321. std::string vars() override {
  1322. return VARS_TO_STR4(type, ne_a, dim, max_period);
  1323. }
  1324. test_timestep_embedding(ggml_type type = GGML_TYPE_F32,
  1325. std::array<int64_t, 4> ne_a = {2, 1, 1, 1},
  1326. int dim = 320, int max_period=10000)
  1327. : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {}
  1328. ggml_tensor * build_graph(ggml_context * ctx) override {
  1329. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1330. ggml_tensor * out = ggml_timestep_embedding(ctx, a, dim, max_period);
  1331. return out;
  1332. }
  1333. };
  1334. // GGML_OP_LEAKY_RELU
  1335. struct test_leaky_relu : public test_case {
  1336. const ggml_type type;
  1337. const std::array<int64_t, 4> ne_a;
  1338. const float negative_slope;
  1339. std::string vars() override {
  1340. return VARS_TO_STR3(type, ne_a, negative_slope);
  1341. }
  1342. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  1343. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1344. float negative_slope = 0.1f)
  1345. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  1346. ggml_tensor * build_graph(ggml_context * ctx) override {
  1347. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1348. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  1349. return out;
  1350. }
  1351. };
  1352. // GGML_OP_FLASH_ATTN_EXT
  1353. struct test_flash_attn_ext : public test_case {
  1354. const int64_t hs; // head size
  1355. const int64_t nh; // num heads
  1356. const int64_t kv; // kv size
  1357. const int64_t nb; // batch size
  1358. const bool mask; // use mask
  1359. const float max_bias; // ALiBi
  1360. const ggml_type type_KV;
  1361. std::string vars() override {
  1362. return VARS_TO_STR7(hs, nh, kv, nb, mask, max_bias, type_KV);
  1363. }
  1364. double max_nmse_err() override {
  1365. return 5e-4;
  1366. }
  1367. test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, bool mask = true, float max_bias = 0.0f, ggml_type type_KV = GGML_TYPE_F16)
  1368. : hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias), type_KV(type_KV) {}
  1369. ggml_tensor * build_graph(ggml_context * ctx) override {
  1370. const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV));
  1371. ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs_padded, nb, nh, 1);
  1372. ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1373. ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  1374. ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr;
  1375. ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias);
  1376. return out;
  1377. }
  1378. };
  1379. enum llm_norm_type {
  1380. LLM_NORM,
  1381. LLM_NORM_RMS,
  1382. };
  1383. struct llama_hparams {
  1384. uint32_t n_vocab;
  1385. uint32_t n_embd;
  1386. uint32_t n_head;
  1387. uint32_t n_head_kv;
  1388. static constexpr uint32_t n_layer = 1;
  1389. uint32_t n_rot;
  1390. uint32_t n_embd_head; // dimension of values (d_v)
  1391. uint32_t n_ff;
  1392. float f_norm_eps;
  1393. float f_norm_rms_eps;
  1394. // cparams
  1395. static constexpr uint32_t n_ctx = 512; // user-specified context size
  1396. static constexpr uint32_t n_ctx_orig = n_ctx;
  1397. // batch
  1398. int32_t n_tokens;
  1399. // llm_build_context
  1400. static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx
  1401. static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache
  1402. uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads
  1403. return n_embd_head * n_head_kv;
  1404. }
  1405. };
  1406. // LLM base class
  1407. struct test_llm : public test_case {
  1408. llama_hparams hp;
  1409. protected:
  1410. test_llm(llama_hparams hp)
  1411. : hp(std::move(hp)) {
  1412. }
  1413. public:
  1414. struct ggml_tensor * llm_build_norm(
  1415. struct ggml_context * ctx,
  1416. struct ggml_tensor * cur,
  1417. struct ggml_tensor * mw,
  1418. struct ggml_tensor * mb,
  1419. llm_norm_type type) {
  1420. switch (type) {
  1421. case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break;
  1422. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break;
  1423. }
  1424. cur = ggml_mul(ctx, cur, mw);
  1425. if (mb) {
  1426. cur = ggml_add(ctx, cur, mb);
  1427. }
  1428. return cur;
  1429. }
  1430. void llm_build_kv_store(
  1431. struct ggml_context * ctx,
  1432. struct ggml_tensor * k_l,
  1433. struct ggml_tensor * v_l,
  1434. struct ggml_tensor * k_cur,
  1435. struct ggml_tensor * v_cur) {
  1436. // compute the transposed [n_tokens, n_embd] V matrix
  1437. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens));
  1438. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(),
  1439. (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head);
  1440. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(),
  1441. ( hp.n_ctx)*ggml_element_size(v_l),
  1442. (hp.kv_head)*ggml_element_size(v_l));
  1443. // important: storing RoPE-ed version of K in the KV cache!
  1444. ggml_cpy(ctx, k_cur, k_cache_view);
  1445. ggml_cpy(ctx, v_cur_t, v_cache_view);
  1446. }
  1447. struct ggml_tensor * llm_build_kqv(
  1448. struct ggml_context * ctx,
  1449. struct ggml_tensor * k_l,
  1450. struct ggml_tensor * v_l,
  1451. struct ggml_tensor * q_cur,
  1452. struct ggml_tensor * kq_mask,
  1453. float kq_scale) {
  1454. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  1455. struct ggml_tensor * k =
  1456. ggml_view_3d(ctx, k_l,
  1457. hp.n_embd_head, hp.n_kv, hp.n_head_kv,
  1458. ggml_row_size(k_l->type, hp.n_embd_gqa()),
  1459. ggml_row_size(k_l->type, hp.n_embd_head),
  1460. 0);
  1461. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  1462. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
  1463. // split cached v into n_head heads
  1464. struct ggml_tensor * v =
  1465. ggml_view_3d(ctx, v_l,
  1466. hp.n_kv, hp.n_embd_head, hp.n_head_kv,
  1467. ggml_element_size(v_l)*hp.n_ctx,
  1468. ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head,
  1469. 0);
  1470. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  1471. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  1472. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens);
  1473. struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1474. cur = ggml_mul_mat(ctx, wo, cur);
  1475. return cur;
  1476. }
  1477. void initialize_tensors(ggml_context * ctx) override {
  1478. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1479. if (t->type == GGML_TYPE_I32) {
  1480. // pos
  1481. std::vector<int> data(hp.n_tokens);
  1482. for (int i = 0; i < hp.n_tokens; i++) {
  1483. data[i] = rand() % hp.n_ctx;
  1484. }
  1485. ggml_backend_tensor_set(t, data.data(), 0, hp.n_tokens * sizeof(int));
  1486. } else {
  1487. init_tensor_uniform(t);
  1488. }
  1489. }
  1490. }
  1491. };
  1492. // Llama
  1493. struct test_llama : public test_llm {
  1494. static constexpr float freq_base = 10000.0f;
  1495. static constexpr float freq_scale = 1.0f;
  1496. static constexpr float ext_factor = 0.0f;
  1497. static constexpr float attn_factor = 1.0f;
  1498. static constexpr float beta_fast = 32.0f;
  1499. static constexpr float beta_slow = 1.0f;
  1500. std::string op_desc(ggml_tensor * t) override {
  1501. GGML_UNUSED(t);
  1502. return "LLAMA";
  1503. }
  1504. std::string vars() override {
  1505. auto n_tokens = hp.n_tokens;
  1506. return VARS_TO_STR1(n_tokens);
  1507. }
  1508. double max_nmse_err() override {
  1509. return 2e-3;
  1510. }
  1511. test_llama(int n_tokens = 1)
  1512. : test_llm({
  1513. /*n_vocab =*/ 32000,
  1514. /*n_embd =*/ 3200,
  1515. /*n_head =*/ 32,
  1516. /*n_head_kv =*/ 32,
  1517. /*n_rot =*/ 100,
  1518. /*n_embd_head =*/ 100,
  1519. /*n_ff =*/ 8640,
  1520. /*f_norm_eps =*/ 0.f,
  1521. /*f_norm_rms_eps =*/ 1e-5f,
  1522. /*n_tokens =*/ n_tokens,
  1523. }) {
  1524. }
  1525. ggml_tensor * build_graph(ggml_context * ctx) override {
  1526. struct ggml_tensor * cur;
  1527. struct ggml_tensor * inpL;
  1528. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1529. // inp_pos - contains the positions
  1530. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1531. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1532. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1533. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1534. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1535. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1536. struct ggml_tensor * inpSA = inpL;
  1537. // norm
  1538. ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1539. cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS);
  1540. // self-attention
  1541. {
  1542. ggml_tensor * wq = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  1543. ggml_tensor * wk = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1544. ggml_tensor * wv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  1545. // compute Q and K and RoPE them
  1546. struct ggml_tensor * Qcur = ggml_mul_mat(ctx, wq, cur);
  1547. struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur);
  1548. struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur);
  1549. Qcur = ggml_rope_ext(
  1550. ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr,
  1551. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1552. ext_factor, attn_factor, beta_fast, beta_slow
  1553. );
  1554. Kcur = ggml_rope_ext(
  1555. ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr,
  1556. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  1557. ext_factor, attn_factor, beta_fast, beta_slow
  1558. );
  1559. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1560. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1561. }
  1562. struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA);
  1563. // feed-forward network
  1564. ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1565. cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS);
  1566. ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1567. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1568. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1569. struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur);
  1570. cur = ggml_mul_mat(ctx, ffn_gate, cur);
  1571. cur = ggml_silu(ctx, cur);
  1572. cur = ggml_mul(ctx, cur, tmp);
  1573. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1574. cur = ggml_add(ctx, cur, ffn_inp);
  1575. // input for next layer
  1576. inpL = cur;
  1577. }
  1578. cur = inpL;
  1579. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1580. cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS);
  1581. // lm_head
  1582. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab);
  1583. cur = ggml_mul_mat(ctx, output, cur);
  1584. return cur;
  1585. }
  1586. };
  1587. // Falcon
  1588. struct test_falcon : public test_llm {
  1589. static constexpr float freq_base = 10000.0f;
  1590. static constexpr float freq_scale = 1.0f;
  1591. static constexpr float ext_factor = 0.0f;
  1592. static constexpr float attn_factor = 1.0f;
  1593. static constexpr float beta_fast = 32.0f;
  1594. static constexpr float beta_slow = 1.0f;
  1595. std::string op_desc(ggml_tensor * t) override {
  1596. GGML_UNUSED(t);
  1597. return "FALCON";
  1598. }
  1599. std::string vars() override {
  1600. auto n_tokens = hp.n_tokens;
  1601. return VARS_TO_STR1(n_tokens);
  1602. }
  1603. double max_nmse_err() override {
  1604. return 2e-3;
  1605. }
  1606. test_falcon(int n_tokens = 1)
  1607. : test_llm({
  1608. /*n_vocab =*/ 32000,
  1609. /*n_embd =*/ 3200,
  1610. /*n_head =*/ 50,
  1611. /*n_head_kv =*/ 1,
  1612. /*n_rot =*/ 64,
  1613. /*n_embd_head =*/ 64,
  1614. /*n_ff =*/ 8640,
  1615. /*f_norm_eps =*/ 1e-5f,
  1616. /*f_norm_rms_eps =*/ 0.f,
  1617. /*n_tokens =*/ n_tokens,
  1618. }) {
  1619. }
  1620. ggml_tensor * build_graph(ggml_context * ctx) override {
  1621. struct ggml_tensor * cur;
  1622. struct ggml_tensor * inpL;
  1623. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  1624. // inp_pos - contains the positions
  1625. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  1626. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  1627. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  1628. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1629. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  1630. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  1631. // norm
  1632. ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1633. ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1634. ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM);
  1635. // self-attention
  1636. {
  1637. cur = attn_norm;
  1638. ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa());
  1639. cur = ggml_mul_mat(ctx, wqkv, cur);
  1640. struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd)));
  1641. struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd)));
  1642. struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa())));
  1643. Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens);
  1644. Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens);
  1645. // using mode = 2 for neox mode
  1646. Qcur = ggml_rope_ext(
  1647. ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1648. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1649. );
  1650. Kcur = ggml_rope_ext(
  1651. ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  1652. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  1653. );
  1654. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  1655. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  1656. }
  1657. struct ggml_tensor * ffn_inp = cur;
  1658. // feed forward
  1659. {
  1660. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  1661. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  1662. cur = attn_norm;
  1663. cur = ggml_mul_mat(ctx, ffn_up, cur);
  1664. cur = ggml_gelu(ctx, cur);
  1665. cur = ggml_mul_mat(ctx, ffn_down, cur);
  1666. }
  1667. cur = ggml_add(ctx, cur, ffn_inp);
  1668. cur = ggml_add(ctx, cur, inpL);
  1669. // input for next layer
  1670. inpL = cur;
  1671. }
  1672. cur = inpL;
  1673. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1674. ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  1675. cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM);
  1676. // lm_head
  1677. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab);
  1678. cur = ggml_mul_mat(ctx, output, cur);
  1679. return cur;
  1680. }
  1681. };
  1682. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
  1683. std::vector<std::unique_ptr<test_case>> test_cases;
  1684. std::default_random_engine rng(0);
  1685. const ggml_type all_types[] = {
  1686. GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
  1687. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  1688. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1689. GGML_TYPE_Q8_0,
  1690. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1691. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  1692. GGML_TYPE_Q6_K,
  1693. GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1694. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1695. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1696. };
  1697. const ggml_type base_types[] = {
  1698. GGML_TYPE_F32, GGML_TYPE_F16,
  1699. GGML_TYPE_Q4_0,
  1700. GGML_TYPE_Q4_K,
  1701. GGML_TYPE_IQ2_XXS
  1702. };
  1703. const ggml_type other_types[] = {
  1704. GGML_TYPE_Q4_1,
  1705. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1706. GGML_TYPE_Q8_0,
  1707. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1708. GGML_TYPE_Q5_K,
  1709. GGML_TYPE_Q6_K,
  1710. GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  1711. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  1712. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  1713. };
  1714. // unary ops
  1715. for (int v : {0, 1}) {
  1716. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  1717. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 128, 10, 10, 10 }, v));
  1718. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 7, 13, 19, 23 }, v));
  1719. }
  1720. }
  1721. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  1722. for (ggml_type type : all_types) {
  1723. for (int b : {1, 7}) {
  1724. for (bool v : {false, true}) {
  1725. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  1726. }
  1727. }
  1728. }
  1729. for (int b : {1, 7}) {
  1730. for (bool v : {false, true}) {
  1731. test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v));
  1732. }
  1733. }
  1734. for (ggml_type type_input : {GGML_TYPE_F32}) {
  1735. for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
  1736. for (int k0 : {1, 3}) {
  1737. for (int k1 : {1, 3}) {
  1738. for (int s0 : {1, 2}) {
  1739. for (int s1 : {1, 2}) {
  1740. for (int p0 : {0, 1}) {
  1741. for (int p1 : {0, 1}) {
  1742. test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1));
  1743. }
  1744. }
  1745. }
  1746. }
  1747. }
  1748. }
  1749. }
  1750. }
  1751. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
  1752. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
  1753. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
  1754. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1755. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
  1756. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 2, 1}));
  1757. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1758. test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1759. test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1760. test_cases.emplace_back(new test_dup(GGML_TYPE_F32));
  1761. test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
  1762. test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
  1763. test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
  1764. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
  1765. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
  1766. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  1767. for (ggml_type type_dst : all_types) {
  1768. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  1769. }
  1770. }
  1771. test_cases.emplace_back(new test_cont());
  1772. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  1773. for (auto op : {ggml_add, ggml_mul, ggml_div}) {
  1774. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  1775. }
  1776. };
  1777. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 8, 1}, {1, 1, 1, 1});
  1778. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1, 1}, {32, 1, 1, 1});
  1779. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 320, 320}, {1, 1, 1, 1});
  1780. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 1, 1}, {1, 1, 1, 1});
  1781. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 1}, {1, 1, 1, 1});
  1782. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 1});
  1783. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 1, 1, 1});
  1784. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 1, 1});
  1785. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 1});
  1786. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 2});
  1787. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 2});
  1788. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 2, 2});
  1789. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 2, 2, 2});
  1790. // stable diffusion
  1791. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 1, 1, 1});
  1792. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 16, 16, 1});
  1793. add_test_bin_bcast(GGML_TYPE_F32, {1280, 16, 16, 1}, {1, 1, 1, 1});
  1794. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 256, 1, 1});
  1795. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {16, 16, 1, 1});
  1796. add_test_bin_bcast(GGML_TYPE_F32, {16, 16, 1280, 1}, {1, 1, 1, 1});
  1797. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {16, 16, 1, 1});
  1798. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 2560, 1}, {16, 16, 1, 1});
  1799. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {32, 32, 1, 1});
  1800. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {32, 32, 1, 1});
  1801. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 640, 1}, {32, 32, 1, 1});
  1802. add_test_bin_bcast(GGML_TYPE_F32, {5120, 1, 1, 1}, {1, 256, 1, 1});
  1803. add_test_bin_bcast(GGML_TYPE_F32, {640, 1, 1, 1}, {1, 1, 1, 1});
  1804. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  1805. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  1806. test_cases.emplace_back(new test_scale());
  1807. for (float eps : {1e-6f, 1e-5f, 1e-3f, 1e-1f}) {
  1808. test_cases.emplace_back(new test_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1809. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1810. }
  1811. for (ggml_type type_a : base_types) {
  1812. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1813. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1814. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1}));
  1815. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1}));
  1816. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1}));
  1817. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1}));
  1818. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2}));
  1819. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2}));
  1820. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1}));
  1821. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1}));
  1822. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1}));
  1823. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1}));
  1824. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1}));
  1825. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2}));
  1826. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
  1827. }
  1828. }
  1829. for (ggml_type type_a : other_types) {
  1830. for (ggml_type type_b : {GGML_TYPE_F32}) {
  1831. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1832. }
  1833. }
  1834. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
  1835. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
  1836. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
  1837. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
  1838. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
  1839. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
  1840. for (ggml_type type_a : base_types) {
  1841. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1842. for (int n_mats : {4, 8}) {
  1843. for (int n_used : {1, 2, 4}) {
  1844. for (bool b : {false, true}) {
  1845. for (int n : {1, 32}) {
  1846. int m = 512;
  1847. int k = 256;
  1848. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  1849. }
  1850. }
  1851. }
  1852. }
  1853. }
  1854. }
  1855. for (ggml_type type_a : other_types) {
  1856. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1857. for (int n_mats : {4}) {
  1858. for (int n_used : {2}) {
  1859. for (bool b : {false}) {
  1860. for (int n : {1}) {
  1861. int m = 512;
  1862. int k = 256;
  1863. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  1864. }
  1865. }
  1866. }
  1867. }
  1868. }
  1869. }
  1870. test_cases.emplace_back(new test_sqr());
  1871. test_cases.emplace_back(new test_sqrt());
  1872. test_cases.emplace_back(new test_clamp());
  1873. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  1874. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 1}, 5));
  1875. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 10}, 5));
  1876. #if 0
  1877. std::uniform_int_distribution<> dist_ne1(1, 50);
  1878. int exponent = 1;
  1879. while (exponent < (1 << 17)) {
  1880. std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
  1881. for (int n = 0; n < 10; ++n) {
  1882. int64_t ne0 = dist_ne0(rng);
  1883. int64_t ne1 = dist_ne1(rng);
  1884. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
  1885. }
  1886. exponent <<= 1;
  1887. }
  1888. #endif
  1889. for (bool mask : {false, true}) {
  1890. for (float max_bias : {0.0f, 8.0f}) {
  1891. if (!mask && max_bias > 0.0f) continue;
  1892. for (float scale : {1.0f, 0.1f}) {
  1893. for (int64_t ne0 : {16, 1024}) {
  1894. for (int64_t ne1 : {16, 1024}) {
  1895. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias));
  1896. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias));
  1897. }
  1898. }
  1899. }
  1900. }
  1901. }
  1902. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
  1903. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
  1904. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
  1905. {
  1906. bool all = true;
  1907. for (float v : { 0, 1 }) {
  1908. for (float fs : { 1.0f, 1.4245f }) {
  1909. for (float ef : { 0.0f, 0.7465f }) {
  1910. for (float af : { 1.0f, 1.4245f }) {
  1911. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1912. for (bool ff : {false, true}) { // freq_factors
  1913. test_cases.emplace_back(new test_rope(type, {128, 32, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 7B
  1914. if (all) {
  1915. test_cases.emplace_back(new test_rope(type, {128, 40, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 13B
  1916. test_cases.emplace_back(new test_rope(type, {128, 52, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 30B
  1917. test_cases.emplace_back(new test_rope(type, {128, 64, 10, 1}, 128, 0, 512, fs, ef, af, ff, v)); // llama 65B
  1918. }
  1919. if (all) {
  1920. test_cases.emplace_back(new test_rope(type, { 64, 1, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  1921. test_cases.emplace_back(new test_rope(type, { 64, 71, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 7B)
  1922. test_cases.emplace_back(new test_rope(type, { 64, 8, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  1923. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 20, 2, 512, fs, ef, af, ff, v)); // neox (stablelm)
  1924. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 32, 2, 512, fs, ef, af, ff, v)); // neox (phi-2)
  1925. }
  1926. test_cases.emplace_back(new test_rope(type, { 64, 128, 10, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B)
  1927. }
  1928. }
  1929. all = false;
  1930. }
  1931. }
  1932. }
  1933. }
  1934. }
  1935. for (int v : { 0, 1, 2, 3 }) {
  1936. for (int dim : { 0, 1, 2, 3, }) {
  1937. test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
  1938. test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
  1939. }
  1940. }
  1941. for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) {
  1942. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  1943. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  1944. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen
  1945. }
  1946. test_cases.emplace_back(new test_sum_rows());
  1947. test_cases.emplace_back(new test_upscale());
  1948. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
  1949. test_cases.emplace_back(new test_upscale_ext());
  1950. test_cases.emplace_back(new test_group_norm());
  1951. test_cases.emplace_back(new test_acc());
  1952. test_cases.emplace_back(new test_pad());
  1953. test_cases.emplace_back(new test_arange());
  1954. test_cases.emplace_back(new test_timestep_embedding());
  1955. test_cases.emplace_back(new test_leaky_relu());
  1956. for (int hs : { 64, 80, 128, 256, }) {
  1957. for (bool mask : { true, false } ) {
  1958. for (float max_bias : { 0.0f, 8.0f }) {
  1959. if (!mask && max_bias > 0.0f) continue;
  1960. for (int nh : { 32, }) {
  1961. for (int kv : { 512, 1024, }) {
  1962. for (int nb : { 1, 2, 4, 8, }) {
  1963. for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
  1964. test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias, type_KV));
  1965. }
  1966. }
  1967. }
  1968. }
  1969. }
  1970. }
  1971. }
  1972. // these tests are disabled to save execution time, but they can be handy for debugging
  1973. #if 0
  1974. test_cases.emplace_back(new test_llama(1));
  1975. test_cases.emplace_back(new test_llama(2));
  1976. test_cases.emplace_back(new test_falcon(1));
  1977. test_cases.emplace_back(new test_falcon(2));
  1978. #endif
  1979. // run tests
  1980. if (mode == MODE_TEST) {
  1981. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  1982. size_t n_ok = 0;
  1983. for (auto & test : test_cases) {
  1984. if (test->eval(backend, backend_cpu, op_name)) {
  1985. n_ok++;
  1986. }
  1987. }
  1988. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  1989. ggml_backend_free(backend_cpu);
  1990. return n_ok == test_cases.size();
  1991. }
  1992. if (mode == MODE_PERF) {
  1993. for (auto & test : test_cases) {
  1994. test->eval_perf(backend, op_name);
  1995. }
  1996. return true;
  1997. }
  1998. GGML_ASSERT(false);
  1999. return false;
  2000. }
  2001. static void usage(char ** argv) {
  2002. printf("Usage: %s [mode] [-o op] [-b backend]\n", argv[0]);
  2003. printf(" valid modes are: test (compare with CPU backend for correctness) or perf (performance evaluation)\n");
  2004. printf(" op names are as given by ggml_op_desc()\n");
  2005. }
  2006. int main(int argc, char ** argv) {
  2007. test_mode mode = MODE_TEST;
  2008. const char * op_name_filter = NULL;
  2009. const char * backend_filter = NULL;
  2010. for (int i = 1; i < argc; i++) {
  2011. if (strcmp(argv[i], "test") == 0) {
  2012. mode = MODE_TEST;
  2013. } else if (strcmp(argv[i], "perf") == 0) {
  2014. mode = MODE_PERF;
  2015. } else if (strcmp(argv[i], "-o") == 0) {
  2016. if (i + 1 < argc) {
  2017. op_name_filter = argv[++i];
  2018. } else {
  2019. usage(argv);
  2020. return 1;
  2021. }
  2022. } else if (strcmp(argv[i], "-b") == 0) {
  2023. if (i + 1 < argc) {
  2024. backend_filter = argv[++i];
  2025. } else {
  2026. usage(argv);
  2027. return 1;
  2028. }
  2029. } else {
  2030. usage(argv);
  2031. return 1;
  2032. }
  2033. }
  2034. // enumerate backends
  2035. printf("Testing %zu backends\n\n", ggml_backend_reg_get_count());
  2036. size_t n_ok = 0;
  2037. for (size_t i = 0; i < ggml_backend_reg_get_count(); i++) {
  2038. printf("Backend %zu/%zu (%s)\n", i + 1, ggml_backend_reg_get_count(), ggml_backend_reg_get_name(i));
  2039. if (backend_filter != NULL && strcmp(backend_filter, ggml_backend_reg_get_name(i)) != 0) {
  2040. printf(" Skipping\n");
  2041. n_ok++;
  2042. continue;
  2043. }
  2044. ggml_backend_t backend = ggml_backend_reg_init_backend(i, NULL);
  2045. GGML_ASSERT(backend != NULL);
  2046. if (backend_filter == NULL && ggml_backend_is_cpu(backend)) {
  2047. printf(" Skipping CPU backend\n");
  2048. ggml_backend_free(backend);
  2049. n_ok++;
  2050. continue;
  2051. }
  2052. printf(" Backend name: %s\n", ggml_backend_name(backend));
  2053. bool ok = test_backend(backend, mode, op_name_filter);
  2054. printf(" Backend %s: ", ggml_backend_name(backend));
  2055. if (ok) {
  2056. printf("\033[1;32mOK\033[0m\n");
  2057. n_ok++;
  2058. } else {
  2059. printf("\033[1;31mFAIL\033[0m\n");
  2060. }
  2061. printf("\n");
  2062. ggml_backend_free(backend);
  2063. }
  2064. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_reg_get_count());
  2065. if (n_ok != ggml_backend_reg_get_count()) {
  2066. printf("\033[1;31mFAIL\033[0m\n");
  2067. return 1;
  2068. }
  2069. ggml_quantize_free();
  2070. printf("\033[1;32mOK\033[0m\n");
  2071. return 0;
  2072. }