test-backend-ops.cpp 151 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169
  1. // This file defines tests for various GGML ops and backends.
  2. // For the forward pass it asserts that the results of multiple backends computing the same GGML ops are consistent.
  3. // For the backward pass it asserts that the gradients from backpropagation are consistent
  4. // with the gradients obtained via the method of finite differences ("grad" mode, this is optional).
  5. // It is also possible to check the performance ("perf" mode).
  6. //
  7. // this file has three sections: Section 1 does general setup, section 2 defines the GGML ops to be tested,
  8. // and section 3 defines which tests to run.
  9. // Quick start for adding a new GGML op: Go to section 2 and create a struct that inherits from test_case,
  10. // then go to section 3 and add an instantiation of your struct.
  11. // ##############################
  12. // ## Section 1: General Setup ##
  13. // ##############################
  14. #include <ggml.h>
  15. #include <ggml-alloc.h>
  16. #include <ggml-backend.h>
  17. #include <algorithm>
  18. #include <array>
  19. #include <cfloat>
  20. #include <cstdint>
  21. #include <cstring>
  22. #include <cinttypes>
  23. #include <memory>
  24. #include <random>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <string>
  28. #include <thread>
  29. #include <future>
  30. #include <vector>
  31. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  32. size_t nels = ggml_nelements(tensor);
  33. std::vector<float> data(nels);
  34. {
  35. // parallel initialization
  36. static const size_t n_threads = std::thread::hardware_concurrency();
  37. // static RNG initialization (revisit if n_threads stops being constant)
  38. static std::vector<std::default_random_engine> generators = []() {
  39. std::random_device rd;
  40. std::vector<std::default_random_engine> vec;
  41. vec.reserve(n_threads);
  42. //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
  43. for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
  44. return vec;
  45. }();
  46. auto init_thread = [&](size_t ith, size_t start, size_t end) {
  47. std::uniform_real_distribution<float> distribution(min, max);
  48. auto & gen = generators[ith];
  49. for (size_t i = start; i < end; i++) {
  50. data[i] = distribution(gen);
  51. }
  52. };
  53. std::vector<std::future<void>> tasks;
  54. tasks.reserve(n_threads);
  55. for (size_t i = 0; i < n_threads; i++) {
  56. size_t start = i*nels/n_threads;
  57. size_t end = (i+1)*nels/n_threads;
  58. tasks.push_back(std::async(std::launch::async, init_thread, i, start, end));
  59. }
  60. for (auto & t : tasks) {
  61. t.get();
  62. }
  63. }
  64. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  65. ggml_backend_tensor_set(tensor, data.data(), 0, nels * sizeof(float));
  66. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
  67. GGML_ASSERT(nels % ggml_blck_size(tensor->type) == 0);
  68. // dummy importance matrix
  69. std::vector<float> imatrix(tensor->ne[0], 1.0f);
  70. const float * im = imatrix.data();
  71. if (!ggml_quantize_requires_imatrix(tensor->type)) {
  72. // when the imatrix is optional, we want to test both quantization with and without imatrix
  73. // use one of the random numbers to decide
  74. if (data[0] > 0.5f*(min + max)) {
  75. im = nullptr;
  76. }
  77. }
  78. std::vector<uint8_t> dataq(ggml_row_size(tensor->type, nels));
  79. {
  80. // parallel quantization by block
  81. size_t blck_size = ggml_blck_size(tensor->type);
  82. size_t n_blocks = nels / blck_size;
  83. auto quantize_thread = [&](size_t start, size_t end) {
  84. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(),
  85. start * blck_size, end - start, blck_size, im);
  86. };
  87. const size_t min_blocks_per_thread = 1;
  88. const size_t n_threads = std::min<size_t>(std::thread::hardware_concurrency()/2,
  89. std::max<size_t>(1, n_blocks / min_blocks_per_thread));
  90. std::vector<std::future<void>> tasks;
  91. tasks.reserve(n_threads);
  92. for (size_t i = 0; i < n_threads; i++) {
  93. size_t start = i*n_blocks/n_threads;
  94. size_t end = (i+1)*n_blocks/n_threads;
  95. tasks.push_back(std::async(std::launch::async, quantize_thread, start, end));
  96. }
  97. for (auto & t : tasks) {
  98. t.get();
  99. }
  100. }
  101. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  102. } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
  103. // This is going to create some weird integers though.
  104. ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
  105. } else if (tensor->type == GGML_TYPE_I64) {
  106. // Integers with a size of 8 bytes can be set by mirroring the float data, the specific values are again not really meaningful.
  107. const size_t nbytes_half = ggml_nbytes(tensor)/2;
  108. ggml_backend_tensor_set(tensor, data.data(), 0*nbytes_half, nbytes_half);
  109. ggml_backend_tensor_set(tensor, data.data(), 1*nbytes_half, nbytes_half);
  110. } else {
  111. GGML_ABORT("fatal error");
  112. }
  113. }
  114. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  115. std::vector<float> tv;
  116. tv.reserve(ggml_nelements(t));
  117. std::vector<uint8_t> buf(ggml_nbytes(t));
  118. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  119. const auto * tt = ggml_get_type_traits(t->type);
  120. size_t bs = ggml_blck_size(t->type);
  121. std::vector<float> vq(ggml_blck_size(t->type));
  122. bool quantized = ggml_is_quantized(t->type);
  123. // access elements by index to avoid gaps in views
  124. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  125. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  126. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  127. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  128. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  129. if (t->type == GGML_TYPE_F16) {
  130. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  131. } else if (t->type == GGML_TYPE_BF16) {
  132. tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
  133. } else if (t->type == GGML_TYPE_F32) {
  134. tv.push_back(*(float *) &buf[i]);
  135. } else if (t->type == GGML_TYPE_I64) {
  136. tv.push_back((float)*(int64_t *) &buf[i]);
  137. } else if (t->type == GGML_TYPE_I32) {
  138. tv.push_back((float)*(int32_t *) &buf[i]);
  139. } else if (t->type == GGML_TYPE_I16) {
  140. tv.push_back((float)*(int16_t *) &buf[i]);
  141. } else if (t->type == GGML_TYPE_I8) {
  142. tv.push_back((float)*(int8_t *) &buf[i]);
  143. } else if (quantized) {
  144. tt->to_float(&buf[i], vq.data(), bs);
  145. tv.insert(tv.end(), vq.begin(), vq.end());
  146. } else {
  147. GGML_ABORT("fatal error");
  148. }
  149. }
  150. }
  151. }
  152. }
  153. return tv;
  154. }
  155. // normalized mean squared error = mse(a, b) / mse(a, 0)
  156. static double nmse(const float * a, const float * b, size_t n) {
  157. double mse_a_b = 0.0;
  158. double mse_a_0 = 0.0;
  159. for (size_t i = 0; i < n; i++) {
  160. float a_i = a[i];
  161. float b_i = b[i];
  162. mse_a_b += (a_i - b_i) * (a_i - b_i);
  163. mse_a_0 += a_i * a_i;
  164. }
  165. return mse_a_b / mse_a_0;
  166. }
  167. // maximum absolute asymmetry between a and b
  168. // asymmetry: (a - b) / (a + b)
  169. // This is more stable than relative error if one of the values fluctuates towards zero.
  170. // n: number of values to compare.
  171. // expected_vals: optional vector of expected values for a. If expected_vals is not empty, filter out all comparisons where
  172. // a does not match any of the expected values. Needed for noncontinuous gradients where the numerical calculation can fail.
  173. static double mean_abs_asymm(const float * a, const float * b, const size_t n, const std::vector<float> & expected_vals) {
  174. double sum = 0.0f;
  175. size_t nvalid = 0;
  176. for (size_t i = 0; i < n; i++) {
  177. if (!expected_vals.empty()) {
  178. bool matches_any = false;
  179. for (const float & ev : expected_vals) {
  180. if (fabsf(a[i] - ev) < 1e-3f) {
  181. matches_any = true;
  182. break;
  183. }
  184. }
  185. if (!matches_any) {
  186. continue;
  187. }
  188. }
  189. const float asymm = (a[i] - b[i]) / (a[i] + b[i]);
  190. sum += fabsf(asymm);
  191. nvalid++;
  192. }
  193. return sum/nvalid;
  194. }
  195. // utils for printing the variables of the test cases
  196. template<typename T>
  197. static std::string var_to_str(const T & x) {
  198. return std::to_string(x);
  199. }
  200. template<typename T, size_t N>
  201. static std::string var_to_str(const T (&x)[N]) {
  202. std::string s = "[";
  203. for (size_t i = 0; i < N; i++) {
  204. if (i > 0) {
  205. s += ",";
  206. }
  207. s += var_to_str(x[i]);
  208. }
  209. s += "]";
  210. return s;
  211. }
  212. template<typename T, size_t N>
  213. static std::string var_to_str(const std::array<T, N> & x) {
  214. std::string s = "[";
  215. for (size_t i = 0; i < N; i++) {
  216. if (i > 0) {
  217. s += ",";
  218. }
  219. s += var_to_str(x[i]);
  220. }
  221. s += "]";
  222. return s;
  223. }
  224. static std::string var_to_str(ggml_type type) {
  225. return ggml_type_name(type);
  226. }
  227. static std::string var_to_str(ggml_op_pool pool) {
  228. switch (pool) {
  229. case GGML_OP_POOL_AVG: return "avg";
  230. case GGML_OP_POOL_MAX: return "max";
  231. default: return std::to_string(pool);
  232. }
  233. }
  234. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  235. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  236. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  237. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  238. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  239. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  240. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  241. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  242. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  243. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  244. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  245. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  246. #define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
  247. #ifdef GGML_USE_SYCL
  248. static bool inline _isinf(float f) {
  249. return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
  250. }
  251. #else
  252. static bool inline _isinf(float f) { return std::isinf(f); }
  253. #endif
  254. // accept FLT_MAX as infinity
  255. static bool isinf_or_max(float f) {
  256. return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  257. }
  258. static bool ggml_is_view_op(enum ggml_op op) {
  259. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  260. }
  261. enum test_mode {
  262. MODE_TEST,
  263. MODE_PERF,
  264. MODE_GRAD,
  265. };
  266. struct test_case {
  267. virtual ~test_case() {}
  268. virtual std::string op_desc(ggml_tensor * t) {
  269. return ggml_op_desc(t);
  270. }
  271. virtual std::string vars() {
  272. return "";
  273. }
  274. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  275. virtual double max_nmse_err() {
  276. return 1e-7;
  277. }
  278. virtual double max_maa_err() {
  279. return 1e-4;
  280. }
  281. virtual float grad_eps() {
  282. return 1e-1f;
  283. }
  284. // If false, estimate gradient with 2 points, neglects 3rd order derivative and higher.
  285. // If true, estimate gradient with 4 points, neglects 5th order derivative and higher.
  286. virtual bool grad_precise() {
  287. return false;
  288. }
  289. // Skip gradient checks if total number of gradients to be checked is larger than this (to speed up the tests).
  290. virtual int64_t grad_nmax() {
  291. return 10000;
  292. }
  293. // No effect if empty.
  294. // If not empty, skip all gradient checks where the numerical result does not match any of the values.
  295. // Needed for dealing with noncontinuous gradients (e.g. ReLU) where estimation using finite differences is unreliable.
  296. virtual std::vector<float> grad_expect() {
  297. return {};
  298. }
  299. virtual void initialize_tensors(ggml_context * ctx) {
  300. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  301. init_tensor_uniform(t);
  302. }
  303. }
  304. virtual size_t op_size(ggml_tensor * t) {
  305. size_t size = ggml_nbytes(t);
  306. // add source tensors
  307. for (int i = 0; i < GGML_MAX_SRC; i++) {
  308. if (t->src[i] != NULL) {
  309. size += ggml_nbytes(t->src[i]);
  310. }
  311. }
  312. return size;
  313. }
  314. virtual uint64_t op_flops(ggml_tensor * t) {
  315. GGML_UNUSED(t);
  316. return 0;
  317. }
  318. ggml_cgraph * gf = nullptr;
  319. ggml_cgraph * gb = nullptr;
  320. static const int sentinel_size = 1024;
  321. test_mode mode;
  322. std::vector<ggml_tensor *> sentinels;
  323. void add_sentinel(ggml_context * ctx) {
  324. if (mode == MODE_PERF || mode == MODE_GRAD) {
  325. return;
  326. }
  327. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  328. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  329. sentinels.push_back(sentinel);
  330. }
  331. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  332. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  333. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  334. add_sentinel(ctx);
  335. return t;
  336. }
  337. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  338. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  339. add_sentinel(ctx);
  340. return t;
  341. }
  342. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  343. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  344. add_sentinel(ctx);
  345. return t;
  346. }
  347. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  348. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  349. add_sentinel(ctx);
  350. return t;
  351. }
  352. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  353. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  354. add_sentinel(ctx);
  355. return t;
  356. }
  357. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  358. mode = MODE_TEST;
  359. ggml_init_params params = {
  360. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  361. /* .mem_base = */ NULL,
  362. /* .no_alloc = */ true,
  363. };
  364. ggml_context * ctx = ggml_init(params);
  365. GGML_ASSERT(ctx);
  366. gf = ggml_new_graph(ctx);
  367. // pre-graph sentinel
  368. add_sentinel(ctx);
  369. ggml_tensor * out = build_graph(ctx);
  370. if (op_name != nullptr && op_desc(out) != op_name) {
  371. //printf(" %s: skipping\n", op_desc(out).c_str());
  372. ggml_free(ctx);
  373. return true;
  374. }
  375. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  376. fflush(stdout);
  377. // check if the backends support the ops
  378. bool supported = true;
  379. for (ggml_backend_t backend : {backend1, backend2}) {
  380. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  381. if (!ggml_backend_supports_op(backend, t)) {
  382. printf("not supported [%s] ", ggml_backend_name(backend));
  383. supported = false;
  384. break;
  385. }
  386. }
  387. }
  388. if (!supported) {
  389. printf("\n");
  390. ggml_free(ctx);
  391. return true;
  392. }
  393. // post-graph sentinel
  394. add_sentinel(ctx);
  395. // allocate
  396. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  397. if (buf == NULL) {
  398. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
  399. ggml_free(ctx);
  400. return false;
  401. }
  402. // build graph
  403. ggml_build_forward_expand(gf, out);
  404. // add sentinels as graph nodes so that they are checked in the callback
  405. for (ggml_tensor * sentinel : sentinels) {
  406. ggml_graph_add_node(gf, sentinel);
  407. }
  408. // randomize tensors
  409. initialize_tensors(ctx);
  410. // compare
  411. struct callback_userdata {
  412. bool ok;
  413. double max_err;
  414. ggml_backend_t backend1;
  415. ggml_backend_t backend2;
  416. };
  417. callback_userdata ud {
  418. true,
  419. max_nmse_err(),
  420. backend1,
  421. backend2
  422. };
  423. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  424. callback_userdata * ud = (callback_userdata *) user_data;
  425. const char * bn1 = ggml_backend_name(ud->backend1);
  426. const char * bn2 = ggml_backend_name(ud->backend2);
  427. if (t1->op == GGML_OP_NONE) {
  428. // sentinels must be unchanged
  429. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  430. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  431. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  432. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  433. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  434. printf("sentinel mismatch: %s ", t1->name);
  435. ud->ok = false;
  436. return true;
  437. }
  438. }
  439. std::vector<float> f1 = tensor_to_float(t1);
  440. std::vector<float> f2 = tensor_to_float(t2);
  441. for (size_t i = 0; i < f1.size(); i++) {
  442. // check for nans
  443. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  444. printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
  445. ud->ok = false;
  446. return true;
  447. }
  448. // check for infs: both must be inf of the same sign, or both must be finite
  449. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  450. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  451. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  452. printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  453. ud->ok = false;
  454. return true;
  455. }
  456. } else {
  457. printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  458. ud->ok = false;
  459. return true;
  460. }
  461. }
  462. }
  463. double err = nmse(f1.data(), f2.data(), f1.size());
  464. if (err > ud->max_err) {
  465. printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
  466. //for (int i = 0; i < (int) f1.size(); i++) {
  467. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  468. //}
  469. //printf("\n");
  470. //exit(1);
  471. ud->ok = false;
  472. }
  473. return true;
  474. GGML_UNUSED(index);
  475. };
  476. const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
  477. if (!cmp_ok) {
  478. printf("compare failed ");
  479. }
  480. ggml_backend_buffer_free(buf);
  481. ggml_free(ctx);
  482. if (ud.ok && cmp_ok) {
  483. printf("\033[1;32mOK\033[0m\n");
  484. return true;
  485. }
  486. printf("\033[1;31mFAIL\033[0m\n");
  487. return false;
  488. }
  489. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  490. mode = MODE_PERF;
  491. static const size_t graph_nodes = 8192;
  492. ggml_init_params params = {
  493. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  494. /* .mem_base = */ NULL,
  495. /* .no_alloc = */ true,
  496. };
  497. ggml_context * ctx = ggml_init(params);
  498. GGML_ASSERT(ctx);
  499. ggml_tensor * out = build_graph(ctx);
  500. if (op_name != nullptr && op_desc(out) != op_name) {
  501. //printf(" %s: skipping\n", op_desc(out).c_str());
  502. ggml_free(ctx);
  503. return true;
  504. }
  505. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  506. fflush(stdout);
  507. // check if backends support op
  508. if (!ggml_backend_supports_op(backend, out)) {
  509. printf("not supported\n");
  510. ggml_free(ctx);
  511. return true;
  512. }
  513. // align while also leaving some margin for variations in parameters
  514. int align = 8;
  515. int last = (len + align - 1) / align * align;
  516. if (last - len < 5) {
  517. last += align;
  518. }
  519. printf("%*s", last - len, "");
  520. // allocate
  521. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  522. if (buf == NULL) {
  523. printf("failed to allocate tensors\n");
  524. ggml_free(ctx);
  525. return false;
  526. }
  527. // randomize tensors
  528. initialize_tensors(ctx);
  529. // build graph
  530. ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
  531. ggml_build_forward_expand(gf, out);
  532. // warmup run
  533. ggml_backend_graph_compute(backend, gf);
  534. // determine number of runs
  535. int n_runs;
  536. bool is_cpu = ggml_backend_dev_type(ggml_backend_get_device(backend)) == GGML_BACKEND_DEVICE_TYPE_CPU;
  537. if (op_flops(out) > 0) {
  538. // based on flops
  539. const uint64_t GFLOP = 1000 * 1000 * 1000;
  540. const uint64_t target_flops_cpu = 8ULL * GFLOP;
  541. const uint64_t target_flops_gpu = 100ULL * GFLOP;
  542. uint64_t target_flops = is_cpu ? target_flops_cpu : target_flops_gpu;
  543. n_runs = std::min<int>(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_flops / op_flops(out)) + 1;
  544. } else {
  545. // based on memory size
  546. const size_t GB = 1ULL << 30;
  547. const size_t target_size_cpu = 8 * GB;
  548. const size_t target_size_gpu = 32 * GB;
  549. size_t target_size = is_cpu ? target_size_cpu : target_size_gpu;
  550. n_runs = std::min<int>(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_size / op_size(out)) + 1;
  551. }
  552. // duplicate the op
  553. for (int i = 1; i < n_runs; i++) {
  554. ggml_graph_add_node(gf, out);
  555. }
  556. // calculate memory
  557. size_t mem = n_runs * op_size(out);
  558. auto tensor_op_size = [](ggml_tensor * t) {
  559. size_t size = ggml_nbytes(t);
  560. // add source tensors
  561. for (int i = 0; i < GGML_MAX_SRC; i++) {
  562. if (t->src[i] != NULL) {
  563. size += ggml_nbytes(t->src[i]);
  564. }
  565. }
  566. return size;
  567. };
  568. for (int i = 0; i < ggml_graph_n_nodes(gf); ++i) {
  569. if (ggml_is_view_op(ggml_graph_node(gf, i)->op) || ggml_graph_node(gf, i) == out) {
  570. continue;
  571. }
  572. mem += tensor_op_size(ggml_graph_node(gf, i));
  573. }
  574. // run
  575. int64_t total_time_us = 0;
  576. int64_t total_mem = 0;
  577. int total_runs = 0;
  578. do {
  579. int64_t start_time = ggml_time_us();
  580. ggml_backend_graph_compute(backend, gf);
  581. int64_t end_time = ggml_time_us();
  582. total_time_us += end_time - start_time;
  583. total_mem += mem;
  584. total_runs += n_runs;
  585. } while (total_time_us < 1000*1000); // run for at least 1 second
  586. printf(" %8d runs - %8.2f us/run - ",
  587. total_runs,
  588. (double)total_time_us / total_runs);
  589. if (op_flops(out) > 0) {
  590. double flops_per_sec = (op_flops(out) * total_runs) / (total_time_us / 1e6);
  591. auto format_flops = [](double flops) -> std::string {
  592. char buf[256];
  593. if (flops >= 1e12) {
  594. snprintf(buf, sizeof(buf), "%6.2f TFLOP", flops / 1e12);
  595. } else if (flops >= 1e9) {
  596. snprintf(buf, sizeof(buf), "%6.2f GFLOP", flops / 1e9);
  597. } else if (flops >= 1e6) {
  598. snprintf(buf, sizeof(buf), "%6.2f MFLOP", flops / 1e6);
  599. } else {
  600. snprintf(buf, sizeof(buf), "%6.2f KFLOP", flops / 1e3);
  601. }
  602. return buf;
  603. };
  604. printf("%s/run - \033[1;34m%sS\033[0m",
  605. format_flops(op_flops(out)).c_str(),
  606. format_flops(flops_per_sec).c_str());
  607. } else {
  608. printf("%8zu kB/run - \033[1;34m%7.2f GB/s\033[0m",
  609. op_size(out) / 1024,
  610. total_mem / (total_time_us / 1e6) / 1024.0 / 1024.0 / 1024.0);
  611. }
  612. printf("\n");
  613. ggml_backend_buffer_free(buf);
  614. ggml_free(ctx);
  615. return true;
  616. }
  617. bool eval_grad(ggml_backend_t backend, const char * op_name) {
  618. mode = MODE_GRAD;
  619. const std::vector<float> expect = grad_expect();
  620. ggml_init_params params = {
  621. /* .mem_size = */ ggml_tensor_overhead()*128 + 2*ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, true),
  622. /* .mem_base = */ NULL,
  623. /* .no_alloc = */ true,
  624. };
  625. ggml_context * ctx = ggml_init(params);
  626. GGML_ASSERT(ctx);
  627. gf = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, true);
  628. gb = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, true);
  629. ggml_tensor * out = build_graph(ctx);
  630. if ((op_name != nullptr && op_desc(out) != op_name) || out->op == GGML_OP_OPT_STEP_ADAMW) {
  631. //printf(" %s: skipping\n", op_desc(out).c_str());
  632. ggml_free(ctx);
  633. return true;
  634. }
  635. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  636. fflush(stdout);
  637. if (out->type != GGML_TYPE_F32) {
  638. ggml_free(ctx);
  639. printf("not supported [%s->type != FP32]\n", out->name);
  640. return true;
  641. }
  642. // check if the backend supports the ops
  643. bool supported = true;
  644. bool any_params = false;
  645. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  646. if (!ggml_backend_supports_op(backend, t)) {
  647. printf("not supported [%s] ", ggml_backend_name(backend));
  648. supported = false;
  649. break;
  650. }
  651. if ((t->flags & GGML_TENSOR_FLAG_PARAM)) {
  652. any_params = true;
  653. if (t->type != GGML_TYPE_F32) {
  654. printf("not supported [%s->type != FP32] ", t->name);
  655. supported = false;
  656. break;
  657. }
  658. }
  659. }
  660. if (!any_params) {
  661. printf("not supported [%s] \n", op_name);
  662. supported = false;
  663. }
  664. if (!supported) {
  665. printf("\n");
  666. ggml_free(ctx);
  667. return true;
  668. }
  669. int64_t ngrads = 0;
  670. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  671. if (t->flags & GGML_TENSOR_FLAG_PARAM) {
  672. ngrads += ggml_nelements(t);
  673. }
  674. }
  675. if (ngrads > grad_nmax()) {
  676. printf("skipping large tensors for speed \n");
  677. ggml_free(ctx);
  678. return true;
  679. }
  680. if (!ggml_is_scalar(out)) {
  681. out = ggml_sum(ctx, out);
  682. ggml_set_name(out, "sum_of_out");
  683. }
  684. ggml_set_loss(out);
  685. ggml_build_forward_expand(gf, out);
  686. ggml_graph_cpy(gf, gb);
  687. ggml_build_backward_expand(ctx, ctx, gb, false);
  688. if (expect.size() != 1 || expect[0] != 0.0f) {
  689. GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf));
  690. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  691. GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || ggml_graph_get_grad(gb, t)->op != GGML_OP_NONE);
  692. }
  693. }
  694. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  695. if (!ggml_backend_supports_op(backend, t)) {
  696. printf("not supported [%s] ", ggml_backend_name(backend));
  697. supported = false;
  698. break;
  699. }
  700. if ((t->flags & GGML_TENSOR_FLAG_PARAM) && t->type != GGML_TYPE_F32) {
  701. printf("not supported [%s->type != FP32] ", t->name);
  702. supported = false;
  703. break;
  704. }
  705. }
  706. if (!supported) {
  707. printf("\n");
  708. ggml_free(ctx);
  709. return true;
  710. }
  711. // allocate
  712. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  713. if (buf == NULL) {
  714. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend));
  715. ggml_free(ctx);
  716. return false;
  717. }
  718. initialize_tensors(ctx); // Randomizes all tensors (including gradients).
  719. ggml_graph_reset(gb); // Sets gradients to 1 if loss, 0 otherwise.
  720. ggml_backend_graph_compute(backend, gf);
  721. ggml_backend_graph_compute(backend, gb);
  722. bool ok = true;
  723. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  724. if (!(t->flags & GGML_TENSOR_FLAG_PARAM)) {
  725. continue;
  726. }
  727. const char * bn = ggml_backend_name(backend);
  728. const int64_t ne = ggml_nelements(t);
  729. std::vector<float> ga;
  730. struct ggml_tensor * grad = ggml_graph_get_grad(gb, t);
  731. if (grad) {
  732. ga = tensor_to_float(grad);
  733. } else {
  734. ga.resize(ne); // default value is 0.0f
  735. }
  736. for (int64_t i = 0; i < ne; ++i) { // gradient algebraic
  737. // check for nans
  738. if (!std::isfinite(ga[i])) {
  739. printf("[%s] nonfinite gradient at index %" PRId64 " (%s=%f) ", ggml_op_desc(t), i, bn, ga[i]);
  740. ok = false;
  741. break;
  742. }
  743. }
  744. if (!ok) {
  745. break;
  746. }
  747. std::vector<float> gn(ne); // gradient numeric
  748. GGML_ASSERT(ga.size() == gn.size());
  749. std::vector<float> x0 = tensor_to_float(t); // original t data
  750. GGML_ASSERT(ggml_is_scalar(out));
  751. GGML_ASSERT(out->type == GGML_TYPE_F32);
  752. const float eps = grad_eps();
  753. for (int64_t i = 0; i < ne; ++i) {
  754. const float xiu = x0[i] + 1.0f*eps; // x, index i, up
  755. const float xiuh = x0[i] + 0.5f*eps; // x, index i, up half
  756. const float xidh = x0[i] - 0.5f*eps; // x, index i, down half
  757. const float xid = x0[i] - 1.0f*eps; // x, index i, down
  758. float fu, fuh, fdh, fd; // output values for xiu, xiuh, xid, xidh
  759. ggml_backend_tensor_set(t, &xiu, i*sizeof(float), sizeof(float));
  760. ggml_backend_graph_compute(backend, gf);
  761. ggml_backend_tensor_get(out, &fu, 0, ggml_nbytes(out));
  762. ggml_backend_tensor_set(t, &xid, i*sizeof(float), sizeof(float));
  763. ggml_backend_graph_compute(backend, gf);
  764. ggml_backend_tensor_get(out, &fd, 0, ggml_nbytes(out));
  765. if (grad_precise()) {
  766. ggml_backend_tensor_set(t, &xiuh, i*sizeof(float), sizeof(float));
  767. ggml_backend_graph_compute(backend, gf);
  768. ggml_backend_tensor_get(out, &fuh, 0, ggml_nbytes(out));
  769. ggml_backend_tensor_set(t, &xidh, i*sizeof(float), sizeof(float));
  770. ggml_backend_graph_compute(backend, gf);
  771. ggml_backend_tensor_get(out, &fdh, 0, ggml_nbytes(out));
  772. gn[i] = (8.0*(double)fuh + (double)fd - (8.0*(double)fdh + (double)fu)) / (6.0*(double)eps);
  773. } else {
  774. gn[i] = (fu - fd) / (2.0f*eps);
  775. }
  776. ggml_backend_tensor_set(t, x0.data(), 0, ggml_nbytes(t));
  777. }
  778. const double err = mean_abs_asymm(gn.data(), ga.data(), gn.size(), expect);
  779. if (err > max_maa_err()) {
  780. printf("[%s] MAA = %.9f > %.9f ", ggml_op_desc(t), err, max_maa_err());
  781. ok = false;
  782. break;
  783. }
  784. if (!ok) {
  785. break;
  786. }
  787. }
  788. if (!ok) {
  789. printf("compare failed ");
  790. }
  791. ggml_backend_buffer_free(buf);
  792. ggml_free(ctx);
  793. if (ok) {
  794. printf("\033[1;32mOK\033[0m\n");
  795. return true;
  796. }
  797. printf("\033[1;31mFAIL\033[0m\n");
  798. return false;
  799. }
  800. };
  801. // ###################################
  802. // ## Section 2: GGML Op Defintions ##
  803. // ###################################
  804. // The following is an example showing the bare minimum for creating a test for a GGML op.
  805. // GGML_OP_EXAMPLE
  806. struct test_example : public test_case {
  807. // Always define these 2 or variants thereof:
  808. const ggml_type type; // The type of the input tensors.
  809. const std::array<int64_t, 4> ne; // The shape of the input tensors.
  810. // For some ops it's necessary to define multiple types or shapes for the inputs.
  811. // Or they may need additional parameters.
  812. // Put all parameters needed to fully define the test into one of the VARS_TO_STR macros.
  813. // In most cases these are just the properties of the struct that you defined above.
  814. // This is needed for info prints.
  815. std::string vars() override {
  816. return VARS_TO_STR2(type, ne);
  817. }
  818. // Define a constructor for the struct.
  819. // In most cases it will be sufficient to have the same arguments as the struct has properties
  820. // and just use initializer lists.
  821. test_example(ggml_type type = GGML_TYPE_F32,
  822. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  823. : type(type), ne(ne) {}
  824. // Define how a simple GGML compute graph can be constructed for the new GGML op.
  825. ggml_tensor * build_graph(ggml_context * ctx) override {
  826. // Step 1: create input tensors that don't depend on any other tensors:
  827. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  828. ggml_set_name(a, "a"); // Setting names is optional but it's useful for debugging.
  829. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  830. ggml_set_name(b, "b");
  831. // Step 2: use the op that you want to test in the GGML compute graph.
  832. ggml_tensor * out = ggml_add(ctx, a, b); // For this example we're just doing a simple addition.
  833. ggml_set_name(out, "out");
  834. // Step 3: return the output tensor.
  835. return out;
  836. }
  837. // In order to also check the gradients for your op, add calls like ggml_set_param(ctx, a)
  838. // immediately after you create the tensors.
  839. // This is optional and only makes sense if a backward pass has actually been implemented for the new op.
  840. };
  841. // GGML_OP_UNARY
  842. struct test_unary : public test_case {
  843. const ggml_unary_op op;
  844. const ggml_type type;
  845. const std::array<int64_t, 4> ne_a;
  846. int v; // view (1 : non-contiguous a)
  847. std::string vars() override {
  848. return VARS_TO_STR3(type, ne_a, v);
  849. }
  850. test_unary(ggml_unary_op op,
  851. ggml_type type = GGML_TYPE_F32,
  852. std::array<int64_t, 4> ne_a = {128, 2, 2, 2},
  853. int v = 0)
  854. : op(op), type(type), ne_a(ne_a), v(v) {}
  855. ggml_tensor * build_graph(ggml_context * ctx) override {
  856. const bool grad_supported = op == GGML_UNARY_OP_ABS || op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_NEG ||
  857. op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU;
  858. ggml_tensor * a;
  859. if (v & 1) {
  860. auto ne = ne_a; ne[0] *= 3;
  861. a = ggml_new_tensor(ctx, type, 4, ne.data());
  862. if (grad_supported) {
  863. ggml_set_param(ctx, a);
  864. }
  865. ggml_set_name(a, "a");
  866. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  867. ggml_set_name(a, "view_of_a");
  868. } else {
  869. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  870. if (grad_supported) {
  871. ggml_set_param(ctx, a);
  872. }
  873. ggml_set_name(a, "a");
  874. }
  875. ggml_tensor * out = ggml_unary(ctx, a, op);
  876. ggml_set_name(out, "out");
  877. return out;
  878. }
  879. void initialize_tensors(ggml_context * ctx) override {
  880. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  881. // test extended range of values to check for NaNs in GELU
  882. init_tensor_uniform(t, -150.f, 150.f);
  883. }
  884. }
  885. float grad_eps() override {
  886. return 15.0f;
  887. }
  888. std::vector<float> grad_expect() override {
  889. if (op == GGML_UNARY_OP_ABS) {
  890. return {-1.0f, 1.0f};
  891. }
  892. if (op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_STEP) {
  893. return {0.0f};
  894. }
  895. if (op == GGML_UNARY_OP_RELU) {
  896. return {0.0f, 1.0f};
  897. }
  898. return {};
  899. }
  900. };
  901. // GGML_OP_GET_ROWS
  902. struct test_get_rows : public test_case {
  903. const ggml_type type;
  904. const int n; // cols
  905. const int m; // rows
  906. const int r; // rows to get
  907. const int b; // batch size
  908. const bool v; // view (non-contiguous src1)
  909. std::string vars() override {
  910. return VARS_TO_STR6(type, n, m, r, b, v);
  911. }
  912. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  913. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  914. ggml_tensor * build_graph(ggml_context * ctx) override {
  915. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  916. ggml_set_name(in, "in");
  917. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  918. ggml_set_name(rows, "rows");
  919. if (v) {
  920. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  921. ggml_set_name(rows, "view_of_rows");
  922. }
  923. const bool grad_supported = ggml_is_matrix(in) && ggml_is_vector(rows);
  924. if (grad_supported) {
  925. ggml_set_param(ctx, in);
  926. // rows is a constant input -> no gradients
  927. }
  928. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  929. ggml_set_name(out, "out");
  930. return out;
  931. }
  932. void initialize_tensors(ggml_context * ctx) override {
  933. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  934. if (t->type == GGML_TYPE_I32) {
  935. if (ggml_is_view_op(t->op)) { continue; }
  936. // rows
  937. std::vector<int> data(r*b);
  938. for (int i = 0; i < r*b; i++) {
  939. data[i] = rand() % m;
  940. }
  941. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  942. } else {
  943. init_tensor_uniform(t);
  944. }
  945. }
  946. }
  947. };
  948. // GGML_OP_ARGMAX
  949. struct test_argmax : public test_case {
  950. const ggml_type type;
  951. const std::array<int64_t, 4> ne;
  952. std::string vars() override {
  953. return VARS_TO_STR2(type, ne);
  954. }
  955. test_argmax(ggml_type type = GGML_TYPE_F32,
  956. std::array<int64_t, 4> ne = {10, 100, 1, 1})
  957. : type(type), ne(ne) {}
  958. ggml_tensor * build_graph(ggml_context * ctx) override {
  959. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  960. ggml_set_name(a, "a");
  961. ggml_tensor * out = ggml_argmax(ctx, a);
  962. ggml_set_name(out, "out");
  963. return out;
  964. }
  965. void initialize_tensors(ggml_context * ctx) override {
  966. std::random_device rd;
  967. std::default_random_engine rng(rd());
  968. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  969. if (t->type == GGML_TYPE_F32) {
  970. // initialize with unique values to avoid ties
  971. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  972. std::vector<float> data(t->ne[0]);
  973. for (int i = 0; i < t->ne[0]; i++) {
  974. data[i] = i;
  975. }
  976. std::shuffle(data.begin(), data.end(), rng);
  977. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  978. }
  979. } else {
  980. init_tensor_uniform(t);
  981. }
  982. }
  983. }
  984. double max_nmse_err() override {
  985. return 0.0;
  986. }
  987. };
  988. // GGML_OP_COUNT_EQUAL
  989. struct test_count_equal : public test_case {
  990. const ggml_type type;
  991. const std::array<int64_t, 4> ne;
  992. std::string vars() override {
  993. return VARS_TO_STR2(type, ne);
  994. }
  995. test_count_equal(ggml_type type = GGML_TYPE_F32,
  996. std::array<int64_t, 4> ne = {4, 500, 1, 1})
  997. : type(type), ne(ne) {}
  998. ggml_tensor * build_graph(ggml_context * ctx) override {
  999. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1000. ggml_set_name(a, "a");
  1001. ggml_tensor * a_argmax = ggml_argmax(ctx, a);
  1002. ggml_set_name(a_argmax, "a_argmax");
  1003. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1004. ggml_set_name(b, "b");
  1005. ggml_tensor * b_argmax = ggml_argmax(ctx, a);
  1006. ggml_set_name(b_argmax, "b_argmax");
  1007. ggml_tensor * out = ggml_count_equal(ctx, a_argmax, b_argmax);
  1008. ggml_set_name(out, "out");
  1009. return out;
  1010. }
  1011. double max_nmse_err() override {
  1012. return 0.0;
  1013. }
  1014. };
  1015. // GGML_OP_REPEAT
  1016. struct test_repeat : public test_case {
  1017. const ggml_type type;
  1018. const std::array<int64_t, 4> ne;
  1019. const std::array<int, 4> nr;
  1020. std::string vars() override {
  1021. return VARS_TO_STR3(type, ne, nr);
  1022. }
  1023. size_t op_size(ggml_tensor * t) override {
  1024. return ggml_nbytes(t) * 2;
  1025. }
  1026. test_repeat(ggml_type type = GGML_TYPE_F32,
  1027. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  1028. std::array<int, 4> nr = {2, 2, 2, 2})
  1029. : type(type), ne(ne), nr(nr) {}
  1030. ggml_tensor * build_graph(ggml_context * ctx) override {
  1031. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  1032. ggml_set_name(target, "target");
  1033. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1034. ggml_set_param(ctx, src);
  1035. ggml_set_name(src, "src");
  1036. ggml_tensor * out = ggml_repeat(ctx, src, target);
  1037. ggml_set_name(out, "out");
  1038. return out;
  1039. }
  1040. };
  1041. // GGML_OP_DUP
  1042. struct test_dup : public test_case {
  1043. const ggml_type type;
  1044. const std::array<int64_t, 4> ne;
  1045. const std::array<int64_t, 4> permute;
  1046. bool _use_permute;
  1047. std::string vars() override {
  1048. std::string v = VARS_TO_STR2(type, ne);
  1049. if (_use_permute) v += "," + VAR_TO_STR(permute);
  1050. return v;
  1051. }
  1052. test_dup(ggml_type type = GGML_TYPE_F32,
  1053. std::array<int64_t, 4> ne = {10, 10, 20, 1},
  1054. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  1055. : type(type), ne(ne), permute(permute),
  1056. _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  1057. ggml_tensor * build_graph(ggml_context * ctx) override {
  1058. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1059. ggml_set_param(ctx, src);
  1060. ggml_set_name(src, "src");
  1061. if (_use_permute) {
  1062. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  1063. ggml_set_name(src, "src_permuted");
  1064. }
  1065. ggml_tensor * out = ggml_dup(ctx, src);
  1066. ggml_set_name(out, "out");
  1067. return out;
  1068. }
  1069. };
  1070. // GGML_OP_SET
  1071. struct test_set : public test_case {
  1072. const ggml_type type_src;
  1073. const ggml_type type_dst;
  1074. const std::array<int64_t, 4> ne;
  1075. const int dim;
  1076. std::string vars() override {
  1077. return VARS_TO_STR4(type_src, type_dst, ne, dim);
  1078. }
  1079. size_t op_size(ggml_tensor * t) override {
  1080. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  1081. }
  1082. test_set(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  1083. std::array<int64_t, 4> ne = {6, 5, 4, 3}, int dim = 1)
  1084. : type_src(type_src), type_dst(type_dst), ne(ne), dim(dim) {}
  1085. ggml_tensor * build_graph(ggml_context * ctx) override {
  1086. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  1087. ggml_set_param(ctx, src);
  1088. ggml_set_name(src, "src");
  1089. auto ne_dst = ne;
  1090. for (int i = 0; i < dim; ++i) {
  1091. ne_dst[i] *= 2;
  1092. }
  1093. ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, ne_dst.data());
  1094. ggml_set_param(ctx, dst);
  1095. ggml_set_name(dst, "dst");
  1096. size_t offset = 0;
  1097. for (int i = 0; i < dim; ++i) {
  1098. offset += ((ne_dst[i] - ne[i])/2)*dst->nb[i];
  1099. }
  1100. ggml_tensor * out = ggml_set(ctx, dst, src,
  1101. // The backward pass requires setting a contiguous region:
  1102. src->nb[1], src->nb[2], src->nb[3], offset);
  1103. ggml_set_name(out, "out");
  1104. return out;
  1105. }
  1106. };
  1107. // GGML_OP_CPY
  1108. struct test_cpy : public test_case {
  1109. const ggml_type type_src;
  1110. const ggml_type type_dst;
  1111. const std::array<int64_t, 4> ne;
  1112. const std::array<int64_t, 4> permute;
  1113. bool _src_use_permute;
  1114. std::string vars() override {
  1115. return VARS_TO_STR4(type_src, type_dst, ne, permute);
  1116. }
  1117. double max_nmse_err() override {
  1118. return 1e-6;
  1119. }
  1120. size_t op_size(ggml_tensor * t) override {
  1121. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  1122. }
  1123. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  1124. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  1125. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  1126. : type_src(type_src), type_dst(type_dst), ne(ne), permute(permute),
  1127. _src_use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  1128. ggml_tensor * build_graph(ggml_context * ctx) override {
  1129. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  1130. ggml_set_param(ctx, src);
  1131. ggml_set_name(src, "src");
  1132. if (_src_use_permute) {
  1133. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  1134. ggml_set_name(src, "src_permuted");
  1135. }
  1136. ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, src->ne);
  1137. ggml_set_name(dst, "dst");
  1138. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  1139. ggml_set_name(out, "out");
  1140. return out;
  1141. }
  1142. };
  1143. // GGML_OP_CONT
  1144. struct test_cont : public test_case {
  1145. const ggml_type type;
  1146. const std::array<int64_t, 4> ne;
  1147. std::string vars() override {
  1148. return VARS_TO_STR2(type, ne);
  1149. }
  1150. test_cont(ggml_type type = GGML_TYPE_F32,
  1151. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  1152. : type(type), ne(ne) {}
  1153. ggml_tensor * build_graph(ggml_context * ctx) override {
  1154. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1155. ggml_set_param(ctx, src);
  1156. ggml_set_name(src, "src");
  1157. src = ggml_transpose(ctx, src);
  1158. ggml_set_name(src, "src_transposed");
  1159. ggml_tensor * out = ggml_cont(ctx, src);
  1160. ggml_set_name(out, "out");
  1161. return out;
  1162. }
  1163. };
  1164. // GGML_OP_ADD
  1165. // GGML_OP_MUL
  1166. // GGML_OP_DIV
  1167. struct test_bin_bcast : public test_case {
  1168. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  1169. op_t op;
  1170. const ggml_type type;
  1171. const std::array<int64_t, 4> ne;
  1172. const std::array<int, 4> nr;
  1173. std::string vars() override {
  1174. return VARS_TO_STR3(type, ne, nr);
  1175. }
  1176. size_t op_size(ggml_tensor * t) override {
  1177. return ggml_nbytes(t) * 3;
  1178. }
  1179. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  1180. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  1181. std::array<int, 4> nr = {1, 2, 1, 1})
  1182. : op(op), type(type), ne(ne), nr(nr) {}
  1183. ggml_tensor * build_graph(ggml_context * ctx) override {
  1184. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  1185. ggml_set_name(a, "a");
  1186. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1187. ggml_set_name(b, "b");
  1188. // The backward pass supports broadcasting only for GGML_ADD:
  1189. const bool grad_supported = op == ggml_add || ggml_are_same_shape(a, b);
  1190. if (grad_supported) {
  1191. ggml_set_param(ctx, a);
  1192. ggml_set_param(ctx, b);
  1193. }
  1194. ggml_tensor * out = op(ctx, a, b);
  1195. ggml_set_name(out, "out");
  1196. return out;
  1197. }
  1198. void initialize_tensors(ggml_context * ctx) override {
  1199. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1200. if (op == ggml_mul || op == ggml_div) {
  1201. // MUL and DIV have numerical issues around zero:
  1202. init_tensor_uniform(t, 0.9f, 1.1f);
  1203. } else {
  1204. init_tensor_uniform(t);
  1205. }
  1206. }
  1207. }
  1208. float grad_eps() override {
  1209. return 0.1f * (op == ggml_mul ? ne[0]*ne[1]*ne[2]*ne[3] : 1);
  1210. }
  1211. bool grad_precise() override {
  1212. return op == ggml_div;
  1213. }
  1214. double max_maa_err() override {
  1215. return op == ggml_add ? 1e-4 : 1e-3;
  1216. }
  1217. };
  1218. // GGML_OP_ADD1
  1219. struct test_add1 : public test_case {
  1220. const ggml_type type;
  1221. const std::array<int64_t, 4> ne;
  1222. std::string vars() override {
  1223. return VARS_TO_STR2(type, ne);
  1224. }
  1225. test_add1(ggml_type type = GGML_TYPE_F32,
  1226. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  1227. : type(type), ne(ne) {}
  1228. ggml_tensor * build_graph(ggml_context * ctx) override {
  1229. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1230. ggml_set_param(ctx, a);
  1231. ggml_set_name(a, "a");
  1232. ggml_tensor * b = ggml_new_tensor_1d(ctx, type, 1);
  1233. // ggml_set_param(ctx, b); // TODO: implement
  1234. ggml_set_name(b, "b");
  1235. ggml_tensor * out = ggml_add1(ctx, a, b);
  1236. ggml_set_name(out, "out");
  1237. return out;
  1238. }
  1239. float grad_eps() override {
  1240. return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
  1241. }
  1242. };
  1243. // GGML_OP_SCALE
  1244. struct test_scale : public test_case {
  1245. const ggml_type type;
  1246. const std::array<int64_t, 4> ne;
  1247. float scale;
  1248. std::string vars() override {
  1249. return VARS_TO_STR3(type, ne, scale);
  1250. }
  1251. test_scale(ggml_type type = GGML_TYPE_F32,
  1252. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  1253. float scale = 2.0f)
  1254. : type(type), ne(ne), scale(scale) {}
  1255. ggml_tensor * build_graph(ggml_context * ctx) override {
  1256. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1257. ggml_set_param(ctx, a);
  1258. ggml_set_name(a, "a");
  1259. ggml_tensor * out = ggml_scale(ctx, a, scale);
  1260. ggml_set_name(out, "out");
  1261. return out;
  1262. }
  1263. };
  1264. // GGML_OP_NORM
  1265. struct test_norm : public test_case {
  1266. const ggml_type type;
  1267. const std::array<int64_t, 4> ne;
  1268. float eps;
  1269. std::string vars() override {
  1270. return VARS_TO_STR3(type, ne, eps);
  1271. }
  1272. test_norm(ggml_type type = GGML_TYPE_F32,
  1273. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1274. float eps = 1e-6f)
  1275. : type(type), ne(ne), eps(eps) {}
  1276. ggml_tensor * build_graph(ggml_context * ctx) override {
  1277. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1278. ggml_set_name(a, "a");
  1279. ggml_tensor * out = ggml_norm(ctx, a, eps);
  1280. ggml_set_name(out, "out");
  1281. return out;
  1282. }
  1283. };
  1284. // GGML_OP_RMS_NORM
  1285. struct test_rms_norm : public test_case {
  1286. const ggml_type type;
  1287. const std::array<int64_t, 4> ne;
  1288. float eps;
  1289. std::string vars() override {
  1290. return VARS_TO_STR3(type, ne, eps);
  1291. }
  1292. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  1293. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1294. float eps = 1e-6f)
  1295. : type(type), ne(ne), eps(eps) {}
  1296. ggml_tensor * build_graph(ggml_context * ctx) override {
  1297. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1298. ggml_set_param(ctx, a);
  1299. ggml_set_name(a, "a");
  1300. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  1301. ggml_set_name(out, "out");
  1302. return out;
  1303. }
  1304. bool grad_precise() override {
  1305. return true;
  1306. }
  1307. };
  1308. // GGML_OP_SSM_CONV
  1309. struct test_ssm_conv : public test_case {
  1310. const ggml_type type;
  1311. const std::array<int64_t, 4> ne_a;
  1312. const std::array<int64_t, 4> ne_b;
  1313. std::string vars() override {
  1314. return VARS_TO_STR3(type, ne_a, ne_b);
  1315. }
  1316. test_ssm_conv(ggml_type type = GGML_TYPE_F32,
  1317. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  1318. std::array<int64_t, 4> ne_b = {3, 3, 1, 1})
  1319. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1320. ggml_tensor * build_graph(ggml_context * ctx) override {
  1321. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1322. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1323. ggml_tensor * out = ggml_ssm_conv(ctx, a, b);
  1324. return out;
  1325. }
  1326. };
  1327. // GGML_OP_SSM_SCAN
  1328. struct test_ssm_scan : public test_case {
  1329. const ggml_type type;
  1330. const int64_t d_state;
  1331. const int64_t d_inner;
  1332. const int64_t n_seq_tokens;
  1333. const int64_t n_seqs;
  1334. std::string vars() override {
  1335. return VARS_TO_STR5(type, d_state, d_inner, n_seq_tokens, n_seqs);
  1336. }
  1337. test_ssm_scan(ggml_type type = GGML_TYPE_F32,
  1338. int64_t d_state = 32, int64_t d_inner = 32, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1339. : type(type), d_state(d_state), d_inner(d_inner), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1340. ggml_tensor * build_graph(ggml_context * ctx) override {
  1341. ggml_tensor * s = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, d_inner, n_seqs, 1 }.data());
  1342. ggml_tensor * x = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_inner, n_seq_tokens, n_seqs, 1 }.data());
  1343. ggml_tensor * dt = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_inner, n_seq_tokens, n_seqs, 1 }.data());
  1344. ggml_tensor * A = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, d_inner, 1 , 1 }.data());
  1345. ggml_tensor * B = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, n_seq_tokens, n_seqs, 1 }.data());
  1346. ggml_tensor * C = ggml_new_tensor(ctx, type, 4, std::vector<int64_t>{ d_state, n_seq_tokens, n_seqs, 1 }.data());
  1347. ggml_tensor * out = ggml_ssm_scan(ctx, s, x, dt, A, B, C);
  1348. return out;
  1349. }
  1350. };
  1351. // GGML_OP_RWKV_WKV6
  1352. struct test_rwkv_wkv6 : public test_case {
  1353. const ggml_type type;
  1354. const int64_t head_count;
  1355. const int64_t head_size;
  1356. const int64_t n_seq_tokens;
  1357. const int64_t n_seqs;
  1358. std::string vars() override {
  1359. return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs);
  1360. }
  1361. test_rwkv_wkv6(ggml_type type = GGML_TYPE_F32,
  1362. int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1363. : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1364. ggml_tensor * build_graph(ggml_context * ctx) override {
  1365. const int64_t n_tokens = n_seq_tokens * n_seqs;
  1366. ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1367. ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1368. ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1369. ggml_tensor * tf = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size, head_count }.data());
  1370. ggml_tensor * td = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1371. ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size * head_size * head_count, n_seqs }.data());
  1372. ggml_tensor * out = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, s);
  1373. return out;
  1374. }
  1375. };
  1376. // GGML_OP_GATED_LINEAR_ATTN
  1377. struct test_gla : public test_case {
  1378. const ggml_type type;
  1379. const int64_t head_count;
  1380. const int64_t head_size;
  1381. const int64_t n_seq_tokens;
  1382. const int64_t n_seqs;
  1383. std::string vars() override {
  1384. return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs);
  1385. }
  1386. test_gla(ggml_type type = GGML_TYPE_F32,
  1387. int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1388. : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1389. ggml_tensor * build_graph(ggml_context * ctx) override {
  1390. const int64_t n_tokens = n_seq_tokens * n_seqs;
  1391. ggml_tensor * q = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1392. ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1393. ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1394. ggml_tensor * g = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1395. ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size * head_size * head_count, n_seqs }.data());
  1396. ggml_tensor * out = ggml_gated_linear_attn(ctx, k, v, q, g, s, pow(head_size, -0.5));
  1397. return out;
  1398. }
  1399. };
  1400. // GGML_OP_MUL_MAT
  1401. struct test_mul_mat : public test_case {
  1402. const ggml_type type_a;
  1403. const ggml_type type_b;
  1404. const int64_t m;
  1405. const int64_t n;
  1406. const int64_t k;
  1407. const std::array<int64_t, 2> bs; // dims 3 and 4
  1408. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  1409. const std::array<int64_t, 4> per; // permutation of dimensions
  1410. std::string vars() override {
  1411. return VARS_TO_STR8(type_a, type_b, m, n, k, bs, nr, per);
  1412. }
  1413. double max_nmse_err() override {
  1414. return 5e-4;
  1415. }
  1416. uint64_t op_flops(ggml_tensor * t) override {
  1417. GGML_UNUSED(t);
  1418. return 2 * m * n * k * bs[0] * nr[0] * bs[1] * nr[1];
  1419. }
  1420. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  1421. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  1422. std::array<int64_t, 2> bs = {10, 10},
  1423. std::array<int64_t, 2> nr = {2, 2},
  1424. std::array<int64_t, 4> per = {0, 1, 2, 3})
  1425. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), per(per) {}
  1426. ggml_tensor * build_graph(ggml_context * ctx) override {
  1427. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  1428. ggml_tensor * a;
  1429. ggml_tensor * b;
  1430. const int npermuted = (per[0] != 0) + (per[1] != 1) + (per[2] != 2) + (per[3] != 3);
  1431. if (npermuted > 0) {
  1432. GGML_ASSERT(npermuted == 2);
  1433. GGML_ASSERT(!ggml_is_quantized(type_a) || per[0] == 0);
  1434. GGML_ASSERT(!ggml_is_quantized(type_b) || per[0] == 0);
  1435. // Create tensors with the permuted dimensions, then permute them back to the dimensions given by m,n,k.
  1436. const int64_t ne_a[4] = {k, m, bs[0], bs[1]};
  1437. const int64_t ne_b[4] = {k, n, bs[0]*nr[0], bs[1]*nr[1]};
  1438. a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]);
  1439. b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]);
  1440. ggml_set_param(ctx, a);
  1441. ggml_set_param(ctx, b);
  1442. ggml_set_name(a, "a");
  1443. ggml_set_name(b, "b");
  1444. a = ggml_permute(ctx, a, per[0], per[1], per[2], per[3]);
  1445. b = ggml_permute(ctx, b, per[0], per[1], per[2], per[3]);
  1446. ggml_set_name(a, "a_permuted");
  1447. ggml_set_name(b, "b_permuted");
  1448. } else {
  1449. a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]);
  1450. b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  1451. ggml_set_param(ctx, a);
  1452. ggml_set_param(ctx, b);
  1453. ggml_set_name(a, "a");
  1454. ggml_set_name(b, "b");
  1455. }
  1456. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  1457. ggml_set_name(out, "out");
  1458. return out;
  1459. }
  1460. };
  1461. // GGML_OP_MUL_MAT_ID
  1462. struct test_mul_mat_id : public test_case {
  1463. const ggml_type type_a;
  1464. const ggml_type type_b;
  1465. const int n_mats;
  1466. const int n_used;
  1467. const bool b; // brodcast b matrix
  1468. const int64_t m;
  1469. const int64_t n;
  1470. const int64_t k;
  1471. std::string vars() override {
  1472. return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k);
  1473. }
  1474. double max_nmse_err() override {
  1475. return 5e-4;
  1476. }
  1477. uint64_t op_flops(ggml_tensor * t) override {
  1478. GGML_UNUSED(t);
  1479. return 2 * m * k * n * n_used;
  1480. }
  1481. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  1482. int n_mats = 8, int n_used = 2, bool b = false,
  1483. int64_t m = 32, int64_t n = 32, int64_t k = 32)
  1484. : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b),
  1485. m(m), n(n), k(k) {
  1486. GGML_ASSERT(n_used <= n_mats);
  1487. }
  1488. ggml_tensor * build_graph(ggml_context * ctx) override {
  1489. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  1490. ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats);
  1491. ggml_set_name(as, "as");
  1492. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  1493. ggml_set_name(ids, "ids");
  1494. if (n_used != n_mats) {
  1495. ids = ggml_view_2d(ctx, ids, n_used, n, ids->nb[1], 0);
  1496. ggml_set_name(ids, "view_of_ids");
  1497. }
  1498. ggml_tensor * b = ggml_new_tensor_3d(ctx, type_b, k, this->b ? 1 : n_used, n);
  1499. ggml_set_name(b, "b");
  1500. ggml_tensor * out = ggml_mul_mat_id(ctx, as, b, ids);
  1501. ggml_set_name(out, "out");
  1502. return out;
  1503. }
  1504. void initialize_tensors(ggml_context * ctx) override {
  1505. std::random_device rd;
  1506. std::default_random_engine rng(rd());
  1507. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1508. if (t->type == GGML_TYPE_I32) {
  1509. if (ggml_is_view_op(t->op)) { continue; }
  1510. // ids
  1511. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1512. std::vector<int32_t> data(t->ne[0]);
  1513. for (int i = 0; i < t->ne[0]; i++) {
  1514. data[i] = i % n_mats;
  1515. }
  1516. std::shuffle(data.begin(), data.end(), rng);
  1517. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  1518. }
  1519. } else {
  1520. init_tensor_uniform(t);
  1521. }
  1522. }
  1523. }
  1524. };
  1525. // GGML_OP_OUT_PROD
  1526. struct test_out_prod : public test_case {
  1527. const ggml_type type_a;
  1528. const ggml_type type_b;
  1529. const int64_t m;
  1530. const int64_t n;
  1531. const int64_t k;
  1532. const std::array<int64_t, 2> bs; // dims 3 and 4
  1533. const bool trans_b;
  1534. std::string vars() override {
  1535. return VARS_TO_STR7(type_a, type_b, m, n, k, bs, trans_b);
  1536. }
  1537. double max_nmse_err() override {
  1538. return 5e-4;
  1539. }
  1540. test_out_prod(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  1541. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  1542. std::array<int64_t, 2> bs = {10, 10},
  1543. bool trans_b = false)
  1544. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), trans_b(trans_b) {}
  1545. ggml_tensor * build_graph(ggml_context * ctx) override {
  1546. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, m, k, bs[0], bs[1]);
  1547. ggml_set_name(a, "a");
  1548. ggml_tensor * b;
  1549. if (trans_b) {
  1550. b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0], bs[1]);
  1551. b = ggml_transpose(ctx, b);
  1552. } else {
  1553. b = ggml_new_tensor_4d(ctx, type_b, n, k, bs[0], bs[1]);
  1554. }
  1555. ggml_set_name(b, "b");
  1556. ggml_tensor * out = ggml_out_prod(ctx, a, b);
  1557. ggml_set_name(out, "out");
  1558. return out;
  1559. }
  1560. };
  1561. // GGML_OP_SQR
  1562. struct test_sqr : public test_case {
  1563. const ggml_type type;
  1564. const std::array<int64_t, 4> ne;
  1565. std::string vars() override {
  1566. return VARS_TO_STR2(type, ne);
  1567. }
  1568. test_sqr(ggml_type type = GGML_TYPE_F32,
  1569. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  1570. : type(type), ne(ne) {}
  1571. ggml_tensor * build_graph(ggml_context * ctx) override {
  1572. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1573. ggml_set_param(ctx, a);
  1574. ggml_set_name(a, "a");
  1575. ggml_tensor * out = ggml_sqr(ctx, a);
  1576. ggml_set_name(out, "out");
  1577. return out;
  1578. }
  1579. float grad_eps() override {
  1580. return 0.1f * 0.25f*ne[0]*ne[1]*ne[2]*ne[3]; // 10% of expected value of sum.
  1581. }
  1582. };
  1583. // GGML_OP_SQRT
  1584. struct test_sqrt : public test_case {
  1585. const ggml_type type;
  1586. const std::array<int64_t, 4> ne;
  1587. std::string vars() override {
  1588. return VARS_TO_STR2(type, ne);
  1589. }
  1590. test_sqrt(ggml_type type = GGML_TYPE_F32,
  1591. std::array<int64_t, 4> ne = {10, 3, 3, 2})
  1592. : type(type), ne(ne) {}
  1593. ggml_tensor * build_graph(ggml_context * ctx) override {
  1594. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1595. ggml_set_param(ctx, a);
  1596. ggml_set_name(a, "a");
  1597. ggml_tensor * out = ggml_sqrt(ctx, a);
  1598. ggml_set_name(out, "out");
  1599. return out;
  1600. }
  1601. void initialize_tensors(ggml_context * ctx) override {
  1602. // fill with positive values
  1603. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1604. init_tensor_uniform(t, 50.0f, 100.0f);
  1605. }
  1606. }
  1607. float grad_eps() override {
  1608. return 20.0f;
  1609. }
  1610. bool grad_precise() override {
  1611. return true;
  1612. }
  1613. };
  1614. // GGML_OP_LOG
  1615. struct test_log : public test_case {
  1616. const ggml_type type;
  1617. const std::array<int64_t, 4> ne;
  1618. std::string vars() override {
  1619. return VARS_TO_STR2(type, ne);
  1620. }
  1621. test_log(ggml_type type = GGML_TYPE_F32,
  1622. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  1623. : type(type), ne(ne) {}
  1624. ggml_tensor * build_graph(ggml_context * ctx) override {
  1625. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1626. ggml_set_param(ctx, a);
  1627. ggml_set_name(a, "a");
  1628. ggml_tensor * out = ggml_log(ctx, a);
  1629. ggml_set_name(out, "out");
  1630. return out;
  1631. }
  1632. void initialize_tensors(ggml_context * ctx) override {
  1633. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1634. // log(1) == 0, cluster values there to keep the sum low for better precision in the backward pass:
  1635. init_tensor_uniform(t, 0.9f, 1.1f);
  1636. }
  1637. }
  1638. bool grad_precise() override {
  1639. return true;
  1640. }
  1641. };
  1642. // GGML_OP_SIN
  1643. struct test_sin : public test_case {
  1644. const ggml_type type;
  1645. const std::array<int64_t, 4> ne;
  1646. std::string vars() override {
  1647. return VARS_TO_STR2(type, ne);
  1648. }
  1649. test_sin(ggml_type type = GGML_TYPE_F32,
  1650. std::array<int64_t, 4> ne = {10, 2, 2, 2})
  1651. : type(type), ne(ne) {}
  1652. ggml_tensor * build_graph(ggml_context * ctx) override {
  1653. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1654. ggml_set_param(ctx, a);
  1655. ggml_set_name(a, "a");
  1656. ggml_tensor * out = ggml_sin(ctx, a);
  1657. ggml_set_name(out, "out");
  1658. return out;
  1659. }
  1660. void initialize_tensors(ggml_context * ctx) override {
  1661. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1662. init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi].
  1663. }
  1664. }
  1665. double max_maa_err() override {
  1666. return 1e-3;
  1667. }
  1668. float grad_eps() override {
  1669. return 0.2f;
  1670. }
  1671. bool grad_precise() override {
  1672. return true;
  1673. }
  1674. };
  1675. // GGML_OP_COS
  1676. struct test_cos : public test_case {
  1677. const ggml_type type;
  1678. const std::array<int64_t, 4> ne;
  1679. std::string vars() override {
  1680. return VARS_TO_STR2(type, ne);
  1681. }
  1682. test_cos(ggml_type type = GGML_TYPE_F32,
  1683. std::array<int64_t, 4> ne = {10, 2, 2, 2})
  1684. : type(type), ne(ne) {}
  1685. ggml_tensor * build_graph(ggml_context * ctx) override {
  1686. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1687. ggml_set_param(ctx, a);
  1688. ggml_set_name(a, "a");
  1689. ggml_tensor * out = ggml_cos(ctx, a);
  1690. ggml_set_name(out, "out");
  1691. return out;
  1692. }
  1693. void initialize_tensors(ggml_context * ctx) override {
  1694. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1695. init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi].
  1696. }
  1697. }
  1698. double max_maa_err() override {
  1699. return 1e-3;
  1700. }
  1701. float grad_eps() override {
  1702. return 0.2f;
  1703. }
  1704. bool grad_precise() override {
  1705. return true;
  1706. }
  1707. };
  1708. // GGML_OP_CLAMP
  1709. struct test_clamp : public test_case {
  1710. const ggml_type type;
  1711. const std::array<int64_t, 4> ne;
  1712. float min;
  1713. float max;
  1714. std::string vars() override {
  1715. return VARS_TO_STR4(type, ne, min, max);
  1716. }
  1717. test_clamp(ggml_type type = GGML_TYPE_F32,
  1718. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  1719. float min = -0.5f, float max = 0.5f)
  1720. : type(type), ne(ne), min(min), max(max) {}
  1721. ggml_tensor * build_graph(ggml_context * ctx) override {
  1722. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1723. ggml_set_name(a, "a");
  1724. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  1725. ggml_set_name(out, "out");
  1726. return out;
  1727. }
  1728. float grad_eps() override {
  1729. return 1e-2f;
  1730. }
  1731. std::vector<float> grad_expect() override {
  1732. return {0.0f, 1.0f};
  1733. }
  1734. };
  1735. // GGML_OP_DIAG_MASK_INF
  1736. struct test_diag_mask_inf : public test_case {
  1737. const ggml_type type;
  1738. const std::array<int64_t, 4> ne;
  1739. const int n_past;
  1740. std::string vars() override {
  1741. return VARS_TO_STR3(type, ne, n_past);
  1742. }
  1743. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  1744. std::array<int64_t, 4> ne = {10, 10, 3, 2},
  1745. int n_past = 5)
  1746. : type(type), ne(ne), n_past(n_past) {}
  1747. ggml_tensor * build_graph(ggml_context * ctx) override {
  1748. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1749. ggml_set_param(ctx, a);
  1750. ggml_set_name(a, "a");
  1751. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  1752. ggml_set_name(out, "out");
  1753. return out;
  1754. }
  1755. };
  1756. // GGML_OP_SOFT_MAX
  1757. struct test_soft_max : public test_case {
  1758. const ggml_type type;
  1759. const std::array<int64_t, 4> ne;
  1760. const bool mask;
  1761. const float scale;
  1762. const float max_bias;
  1763. std::string vars() override {
  1764. return VARS_TO_STR5(type, ne, mask, scale, max_bias);
  1765. }
  1766. // the 1024 test with bias occasionally fails:
  1767. // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL
  1768. virtual double max_nmse_err() override {
  1769. return 1e-6;
  1770. }
  1771. test_soft_max(ggml_type type = GGML_TYPE_F32,
  1772. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  1773. bool mask = false,
  1774. float scale = 1.0f,
  1775. float max_bias = 0.0f)
  1776. : type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {}
  1777. ggml_tensor * build_graph(ggml_context * ctx) override {
  1778. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1779. ggml_set_param(ctx, a);
  1780. ggml_set_name(a, "a");
  1781. ggml_tensor * mask = nullptr;
  1782. if (this->mask) {
  1783. mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
  1784. ggml_set_name(mask, "mask");
  1785. }
  1786. ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
  1787. ggml_set_name(out, "out");
  1788. return out;
  1789. }
  1790. bool grad_precise() override {
  1791. return true;
  1792. }
  1793. };
  1794. // GGML_OP_ROPE + GGML_OP_ROPE_BACK
  1795. struct test_rope : public test_case {
  1796. const ggml_type type;
  1797. const std::array<int64_t, 4> ne_a;
  1798. int n_dims;
  1799. int mode;
  1800. int n_ctx; // used to generate positions
  1801. float fs; // freq_scale
  1802. float ef; // ext_factor
  1803. float af; // attn_factor
  1804. bool ff;
  1805. int v; // view (1 : non-contiguous a)
  1806. bool forward;
  1807. std::string vars() override {
  1808. // forward can be inferred from the op, does not need to be printed
  1809. return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v);
  1810. }
  1811. test_rope(ggml_type type = GGML_TYPE_F32,
  1812. std::array<int64_t, 4> ne_a = {10, 5, 3, 1},
  1813. int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f,
  1814. float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0, bool forward = true)
  1815. : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v), forward(forward) {}
  1816. ggml_tensor * build_graph(ggml_context * ctx) override {
  1817. ggml_tensor * a;
  1818. if (v & 1) {
  1819. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  1820. a = ggml_new_tensor(ctx, type, 4, ne.data());
  1821. if (forward) {
  1822. ggml_set_param(ctx, a);
  1823. }
  1824. ggml_set_name(a, "a");
  1825. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  1826. ggml_set_name(a, "view_of_a");
  1827. } else {
  1828. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1829. if (forward) {
  1830. ggml_set_param(ctx, a);
  1831. }
  1832. ggml_set_name(a, "a");
  1833. }
  1834. const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
  1835. const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
  1836. ggml_tensor * pos;
  1837. if (is_mrope || is_vision) {
  1838. pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2] * 4);
  1839. } else {
  1840. pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]);
  1841. }
  1842. ggml_set_name(pos, "pos");
  1843. ggml_tensor * freq = nullptr;
  1844. if (ff) {
  1845. freq = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2);
  1846. ggml_set_name(freq, "freq");
  1847. }
  1848. ggml_tensor * out;
  1849. if (is_mrope) {
  1850. if (is_vision) {
  1851. GGML_ASSERT(n_dims/4 > 0);
  1852. int rope_sections[4] = {n_dims/4, n_dims/4, 0, 0}; // Vision-RoPE only use first two dimension for image (x, y) coordinate
  1853. if (forward) {
  1854. out = ggml_rope_multi (ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1855. } else {
  1856. out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1857. }
  1858. } else {
  1859. GGML_ASSERT(n_dims/3 > 0);
  1860. int rope_sections[4] = {n_dims/3, n_dims/3, n_dims/3, 0};
  1861. if (forward) {
  1862. out = ggml_rope_multi (ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1863. } else {
  1864. out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1865. }
  1866. }
  1867. } else {
  1868. if (forward) {
  1869. out = ggml_rope_ext (ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1870. } else {
  1871. out = ggml_rope_ext_back(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  1872. }
  1873. }
  1874. ggml_set_name(out, "out");
  1875. return out;
  1876. }
  1877. void initialize_tensors(ggml_context * ctx) override {
  1878. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1879. if (t->type == GGML_TYPE_I32) {
  1880. // pos
  1881. const int num_pos_ids = (mode & GGML_ROPE_TYPE_MROPE) ? ne_a[2] * 4 : ne_a[2];
  1882. std::vector<int> data(num_pos_ids);
  1883. for (int i = 0; i < num_pos_ids; i++) {
  1884. data[i] = rand() % n_ctx;
  1885. }
  1886. ggml_backend_tensor_set(t, data.data(), 0, num_pos_ids * sizeof(int));
  1887. } else {
  1888. if (t->ne[0] == n_dims/2) {
  1889. // frequency factors in the range [0.9f, 1.1f]
  1890. init_tensor_uniform(t, 0.9f, 1.1f);
  1891. } else {
  1892. init_tensor_uniform(t);
  1893. }
  1894. }
  1895. }
  1896. }
  1897. double max_maa_err() override {
  1898. return 1e-3;
  1899. }
  1900. bool grad_precise() override {
  1901. return true;
  1902. }
  1903. };
  1904. // GGML_OP_POOL2D
  1905. struct test_pool2d : public test_case {
  1906. enum ggml_op_pool pool_type;
  1907. const ggml_type type_input;
  1908. const std::array<int64_t, 4> ne_input;
  1909. // kernel size
  1910. const int k0;
  1911. const int k1;
  1912. // stride
  1913. const int s0;
  1914. const int s1;
  1915. // padding
  1916. const int p0;
  1917. const int p1;
  1918. std::string vars() override {
  1919. return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1);
  1920. }
  1921. test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG,
  1922. ggml_type type_input = GGML_TYPE_F32,
  1923. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1924. int k0 = 3, int k1 = 3,
  1925. int s0 = 1, int s1 = 1,
  1926. int p0 = 1, int p1 = 1)
  1927. : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {}
  1928. ggml_tensor * build_graph(ggml_context * ctx) override {
  1929. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1930. ggml_set_param(ctx, input);
  1931. ggml_set_name(input, "input");
  1932. ggml_tensor * out = ggml_pool_2d(ctx, input, pool_type, k0, k1, s0, s1, p0, p1);
  1933. ggml_set_name(out, "out");
  1934. return out;
  1935. }
  1936. };
  1937. // GGML_OP_CONV_TRANSPOSE_1D
  1938. struct test_conv_transpose_1d : public test_case {
  1939. const std::array<int64_t, 4> ne_input;
  1940. const std::array<int64_t, 4> ne_kernel;
  1941. const int s0; // stride
  1942. const int p0; // padding
  1943. const int d0; // dilation
  1944. std::string vars() override {
  1945. return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0);
  1946. }
  1947. test_conv_transpose_1d(std::array<int64_t, 4> ne_input = {197, 32, 1, 1}, // [input_width, input_height, input_channels, 1]
  1948. std::array<int64_t, 4> ne_kernel = {16, 32, 32, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1949. int s0 = 1, int p0 = 0, int d0 = 1)
  1950. : ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {}
  1951. ggml_tensor * build_graph(ggml_context * ctx) override {
  1952. ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
  1953. ggml_set_name(input, "input");
  1954. ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
  1955. ggml_set_name(kernel, "kernel");
  1956. ggml_tensor * out = ggml_conv_transpose_1d(ctx, kernel, input, s0, p0, d0);
  1957. ggml_set_name(out, "out");
  1958. return out;
  1959. }
  1960. };
  1961. // GGML_OP_IM2COL
  1962. struct test_im2col : public test_case {
  1963. const ggml_type type_input;
  1964. const ggml_type type_kernel;
  1965. const ggml_type dst_type;
  1966. const std::array<int64_t, 4> ne_input;
  1967. const std::array<int64_t, 4> ne_kernel;
  1968. // stride
  1969. const int s0;
  1970. const int s1;
  1971. // padding
  1972. const int p0;
  1973. const int p1;
  1974. // dilation
  1975. const int d0;
  1976. const int d1;
  1977. // mode
  1978. const bool is_2D;
  1979. std::string vars() override {
  1980. return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  1981. }
  1982. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32,
  1983. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  1984. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  1985. int s0 = 1, int s1 = 1,
  1986. int p0 = 1, int p1 = 1,
  1987. int d0 = 1, int d1 = 1,
  1988. bool is_2D = true)
  1989. : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  1990. ggml_tensor * build_graph(ggml_context * ctx) override {
  1991. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  1992. ggml_set_param(ctx, input);
  1993. ggml_set_name(input, "input");
  1994. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  1995. ggml_set_name(kernel, "kernel");
  1996. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D, dst_type);
  1997. ggml_set_name(out, "out");
  1998. return out;
  1999. }
  2000. };
  2001. // GGML_OP_CONCAT
  2002. struct test_concat : public test_case {
  2003. const ggml_type type;
  2004. const std::array<int64_t, 4> ne_a;
  2005. const int64_t ne_b_d;
  2006. const int dim;
  2007. const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b)
  2008. std::string vars() override {
  2009. return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v);
  2010. }
  2011. test_concat(ggml_type type = GGML_TYPE_F32,
  2012. std::array<int64_t, 4> ne_a = {10, 5, 5, 5},
  2013. int64_t ne_b_d = 5,
  2014. int dim = 2, int v = 0)
  2015. : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {}
  2016. ggml_tensor * build_graph(ggml_context * ctx) override {
  2017. auto ne_b = ne_a;
  2018. ne_b[dim] = ne_b_d;
  2019. ggml_tensor * a;
  2020. if (v & 1) {
  2021. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  2022. a = ggml_new_tensor(ctx, type, 4, ne.data());
  2023. ggml_set_name(a, "a");
  2024. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  2025. ggml_set_name(a, "view_of_a");
  2026. } else {
  2027. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2028. ggml_set_name(a, "a");
  2029. }
  2030. ggml_tensor * b;
  2031. if (v & 2) {
  2032. auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4;
  2033. b = ggml_new_tensor(ctx, type, 4, ne.data());
  2034. ggml_set_name(b, "b");
  2035. b = ggml_view_4d(ctx, b, ne_b[0], ne_b[1], ne_b[2], ne_b[3], b->nb[1], b->nb[2], b->nb[3], 0);
  2036. ggml_set_name(b, "view_of_b");
  2037. } else {
  2038. b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  2039. ggml_set_name(b, "b");
  2040. }
  2041. ggml_tensor * out = ggml_concat(ctx, a, b, dim);
  2042. ggml_set_name(out, "out");
  2043. return out;
  2044. }
  2045. };
  2046. // GGML_OP_ARGSORT
  2047. struct test_argsort : public test_case {
  2048. const ggml_type type;
  2049. const std::array<int64_t, 4> ne;
  2050. ggml_sort_order order;
  2051. std::string vars() override {
  2052. return VARS_TO_STR3(type, ne, order);
  2053. }
  2054. test_argsort(ggml_type type = GGML_TYPE_F32,
  2055. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  2056. ggml_sort_order order = GGML_SORT_ORDER_ASC)
  2057. : type(type), ne(ne), order(order) {}
  2058. ggml_tensor * build_graph(ggml_context * ctx) override {
  2059. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2060. ggml_set_name(a, "a");
  2061. ggml_tensor * out = ggml_argsort(ctx, a, order);
  2062. ggml_set_name(out, "out");
  2063. return out;
  2064. }
  2065. void initialize_tensors(ggml_context * ctx) override {
  2066. std::random_device rd;
  2067. std::default_random_engine rng(rd());
  2068. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2069. if (t->type == GGML_TYPE_I32) {
  2070. // indices
  2071. std::vector<int> data(ggml_nelements(t));
  2072. for (int i = 0; i < ggml_nelements(t); i++) {
  2073. data[i] = rand();
  2074. }
  2075. std::shuffle(data.begin(), data.end(), rng);
  2076. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  2077. } else if (t->type == GGML_TYPE_F32) {
  2078. // initialize with unique values to avoid ties
  2079. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  2080. std::vector<float> data(t->ne[0]);
  2081. for (int i = 0; i < t->ne[0]; i++) {
  2082. data[i] = i;
  2083. }
  2084. std::shuffle(data.begin(), data.end(), rng);
  2085. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  2086. }
  2087. } else {
  2088. GGML_ABORT("fatal error");
  2089. }
  2090. }
  2091. }
  2092. };
  2093. // GGML_OP_SUM
  2094. struct test_sum : public test_case {
  2095. const ggml_type type;
  2096. const std::array<int64_t, 4> ne;
  2097. std::string vars() override {
  2098. return VARS_TO_STR2(type, ne);
  2099. }
  2100. test_sum(ggml_type type = GGML_TYPE_F32,
  2101. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2102. : type(type), ne(ne) {}
  2103. ggml_tensor * build_graph(ggml_context * ctx) override {
  2104. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2105. ggml_set_param(ctx, a);
  2106. ggml_set_name(a, "a");
  2107. ggml_tensor * out = ggml_sum(ctx, a);
  2108. ggml_set_name(out, "out");
  2109. return out;
  2110. }
  2111. float grad_eps() override {
  2112. return 0.1f * sqrtf(ne[0]*ne[1]*ne[2]*ne[3]);
  2113. }
  2114. };
  2115. // GGML_OP_SUM_ROWS
  2116. struct test_sum_rows : public test_case {
  2117. const ggml_type type;
  2118. const std::array<int64_t, 4> ne;
  2119. std::string vars() override {
  2120. return VARS_TO_STR2(type, ne);
  2121. }
  2122. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  2123. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2124. : type(type), ne(ne) {}
  2125. ggml_tensor * build_graph(ggml_context * ctx) override {
  2126. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2127. ggml_set_param(ctx, a);
  2128. ggml_set_name(a, "a");
  2129. ggml_tensor * out = ggml_sum_rows(ctx, a);
  2130. ggml_set_name(out, "out");
  2131. return out;
  2132. }
  2133. };
  2134. // GGML_OP_MEAN
  2135. struct test_mean : public test_case {
  2136. const ggml_type type;
  2137. const std::array<int64_t, 4> ne;
  2138. std::string vars() override {
  2139. return VARS_TO_STR2(type, ne);
  2140. }
  2141. test_mean(ggml_type type = GGML_TYPE_F32,
  2142. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2143. : type(type), ne(ne) {}
  2144. ggml_tensor * build_graph(ggml_context * ctx) override {
  2145. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2146. ggml_set_param(ctx, a);
  2147. ggml_set_name(a, "a");
  2148. ggml_tensor * out = ggml_mean(ctx, a);
  2149. ggml_set_name(out, "out");
  2150. return out;
  2151. }
  2152. float grad_eps() override {
  2153. return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
  2154. }
  2155. };
  2156. // GGML_OP_UPSCALE
  2157. struct test_upscale : public test_case {
  2158. const ggml_type type;
  2159. const std::array<int64_t, 4> ne;
  2160. const int32_t scale_factor;
  2161. const bool transpose;
  2162. std::string vars() override {
  2163. return VARS_TO_STR4(type, ne, scale_factor, transpose);
  2164. }
  2165. test_upscale(ggml_type type = GGML_TYPE_F32,
  2166. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  2167. int32_t scale_factor = 2, bool transpose = false)
  2168. : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {}
  2169. ggml_tensor * build_graph(ggml_context * ctx) override {
  2170. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2171. ggml_set_name(a, "a");
  2172. if (transpose) {
  2173. a = ggml_transpose(ctx, a);
  2174. ggml_set_name(a, "a_transposed");
  2175. }
  2176. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
  2177. ggml_set_name(out, "out");
  2178. return out;
  2179. }
  2180. };
  2181. // GGML_OP_UPSCALE (ext)
  2182. struct test_upscale_ext : public test_case {
  2183. const ggml_type type;
  2184. const std::array<int64_t, 4> ne;
  2185. const std::array<int64_t, 4> ne_tgt;
  2186. std::string vars() override {
  2187. return VARS_TO_STR3(type, ne, ne_tgt);
  2188. }
  2189. test_upscale_ext(ggml_type type = GGML_TYPE_F32,
  2190. std::array<int64_t, 4> ne = {2, 5, 7, 11},
  2191. std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13})
  2192. : type(type), ne(ne), ne_tgt(ne_tgt) {}
  2193. ggml_tensor * build_graph(ggml_context * ctx) override {
  2194. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2195. ggml_set_name(a, "a");
  2196. ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]);
  2197. ggml_set_name(out, "out");
  2198. return out;
  2199. }
  2200. };
  2201. // GGML_OP_GROUP_NORM
  2202. struct test_group_norm : public test_case {
  2203. const ggml_type type;
  2204. const std::array<int64_t, 4> ne;
  2205. const int32_t num_groups;
  2206. const float eps;
  2207. std::string vars() override {
  2208. return VARS_TO_STR3(type, ne, num_groups);
  2209. }
  2210. test_group_norm(ggml_type type = GGML_TYPE_F32,
  2211. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  2212. int32_t num_groups = 32,
  2213. float eps = 1e-6f)
  2214. : type(type), ne(ne), num_groups(num_groups), eps(eps) {}
  2215. ggml_tensor * build_graph(ggml_context * ctx) override {
  2216. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2217. ggml_set_name(a, "a");
  2218. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups, eps);
  2219. ggml_set_name(out, "out");
  2220. return out;
  2221. }
  2222. };
  2223. // GGML_OP_ACC
  2224. struct test_acc : public test_case {
  2225. const ggml_type type;
  2226. const std::array<int64_t, 4> ne_a;
  2227. const std::array<int64_t, 4> ne_b;
  2228. std::string vars() override {
  2229. return VARS_TO_STR3(type, ne_a, ne_b);
  2230. }
  2231. test_acc(ggml_type type = GGML_TYPE_F32,
  2232. std::array<int64_t, 4> ne_a = {256, 17, 1, 1},
  2233. std::array<int64_t, 4> ne_b = {256, 16, 1, 1})
  2234. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  2235. ggml_tensor * build_graph(ggml_context * ctx) override {
  2236. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2237. ggml_set_param(ctx, a);
  2238. ggml_set_name(a, "a");
  2239. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  2240. ggml_set_param(ctx, b);
  2241. ggml_set_name(b, "b");
  2242. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  2243. ggml_set_name(out, "out");
  2244. return out;
  2245. }
  2246. };
  2247. // GGML_OP_PAD
  2248. struct test_pad : public test_case {
  2249. const ggml_type type;
  2250. const std::array<int64_t, 4> ne_a;
  2251. const int pad_0;
  2252. const int pad_1;
  2253. std::string vars() override {
  2254. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  2255. }
  2256. test_pad(ggml_type type = GGML_TYPE_F32,
  2257. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  2258. int pad_0 = 1, int pad_1 = 1)
  2259. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  2260. ggml_tensor * build_graph(ggml_context * ctx) override {
  2261. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2262. ggml_set_name(a, "a");
  2263. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  2264. ggml_set_name(out, "out");
  2265. return out;
  2266. }
  2267. };
  2268. // GGML_OP_PAD_REFLECT_1D
  2269. struct test_pad_reflect_1d : public test_case {
  2270. const ggml_type type;
  2271. const std::array<int64_t, 4> ne_a;
  2272. const int pad_0;
  2273. const int pad_1;
  2274. std::string vars() override {
  2275. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  2276. }
  2277. test_pad_reflect_1d(ggml_type type = GGML_TYPE_F32,
  2278. std::array<int64_t, 4> ne_a = {512, 34, 2, 1},
  2279. int pad_0 = 10, int pad_1 = 9)
  2280. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  2281. ggml_tensor * build_graph(ggml_context * ctx) override {
  2282. ggml_tensor * a = ggml_new_tensor(ctx, type, 2, ne_a.data());
  2283. ggml_set_name(a, "a");
  2284. ggml_tensor * out = ggml_pad_reflect_1d(ctx, a, pad_0, pad_1);
  2285. ggml_set_name(out, "out");
  2286. return out;
  2287. }
  2288. };
  2289. // GGML_OP_ARANGE
  2290. struct test_arange : public test_case {
  2291. const ggml_type type;
  2292. const float start;
  2293. const float stop;
  2294. const float step;
  2295. std::string vars() override {
  2296. return VARS_TO_STR4(type, start, stop, step);
  2297. }
  2298. test_arange(ggml_type type = GGML_TYPE_F32,
  2299. float start = 0.f, float stop = 10.f, float step = 1.f)
  2300. : type(type), start(start), stop(stop), step(step) {}
  2301. ggml_tensor * build_graph(ggml_context * ctx) override {
  2302. ggml_tensor * out = ggml_arange(ctx, start, stop, step);
  2303. ggml_set_name(out, "out");
  2304. return out;
  2305. }
  2306. };
  2307. // GGML_OP_TIMESTEP_EMBEDDING
  2308. struct test_timestep_embedding : public test_case {
  2309. const ggml_type type;
  2310. const std::array<int64_t, 4> ne_a;
  2311. const int dim;
  2312. const int max_period;
  2313. std::string vars() override {
  2314. return VARS_TO_STR4(type, ne_a, dim, max_period);
  2315. }
  2316. test_timestep_embedding(ggml_type type = GGML_TYPE_F32,
  2317. std::array<int64_t, 4> ne_a = {2, 1, 1, 1},
  2318. int dim = 320, int max_period=10000)
  2319. : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {}
  2320. ggml_tensor * build_graph(ggml_context * ctx) override {
  2321. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2322. ggml_set_name(a, "a");
  2323. ggml_tensor * out = ggml_timestep_embedding(ctx, a, dim, max_period);
  2324. ggml_set_name(out, "out");
  2325. return out;
  2326. }
  2327. };
  2328. // GGML_OP_LEAKY_RELU
  2329. struct test_leaky_relu : public test_case {
  2330. const ggml_type type;
  2331. const std::array<int64_t, 4> ne_a;
  2332. const float negative_slope;
  2333. std::string vars() override {
  2334. return VARS_TO_STR3(type, ne_a, negative_slope);
  2335. }
  2336. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  2337. std::array<int64_t, 4> ne_a = {10, 5, 4, 3},
  2338. float negative_slope = 0.1f)
  2339. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  2340. ggml_tensor * build_graph(ggml_context * ctx) override {
  2341. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2342. ggml_set_name(a, "a");
  2343. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  2344. ggml_set_name(out, "out");
  2345. return out;
  2346. }
  2347. };
  2348. // GGML_OP_FLASH_ATTN_EXT
  2349. struct test_flash_attn_ext : public test_case {
  2350. const int64_t hs; // head size
  2351. const int64_t nh; // num heads
  2352. const int64_t kv; // kv size
  2353. const int64_t nb; // batch size
  2354. const bool mask; // use mask
  2355. const float max_bias; // ALiBi
  2356. const float logit_softcap; // Gemma 2
  2357. const ggml_type type_KV;
  2358. std::string vars() override {
  2359. return VARS_TO_STR8(hs, nh, kv, nb, mask, max_bias, logit_softcap, type_KV);
  2360. }
  2361. double max_nmse_err() override {
  2362. return 5e-4;
  2363. }
  2364. uint64_t op_flops(ggml_tensor * t) override {
  2365. GGML_UNUSED(t);
  2366. // Just counting matmul costs:
  2367. // Q*K^T is nb x hs x kv, P*V is nb x kv x hs, per head
  2368. return 2 * 2 * nh * nb * hs * kv;
  2369. }
  2370. test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8,
  2371. bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_type type_KV = GGML_TYPE_F16)
  2372. : hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), type_KV(type_KV) {}
  2373. ggml_tensor * build_graph(ggml_context * ctx) override {
  2374. const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV));
  2375. ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs_padded, nb, nh, 1);
  2376. ggml_set_name(q, "q");
  2377. ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  2378. ggml_set_name(k, "k");
  2379. ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
  2380. ggml_set_name(v, "v");
  2381. ggml_tensor * m = nullptr;
  2382. if (mask) {
  2383. m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1);
  2384. ggml_set_name(m, "m");
  2385. }
  2386. ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias, logit_softcap);
  2387. ggml_set_name(out, "out");
  2388. return out;
  2389. }
  2390. bool grad_precise() override {
  2391. return true;
  2392. }
  2393. };
  2394. // GGML_OP_CROSS_ENTROPY_LOSS
  2395. struct test_cross_entropy_loss : public test_case {
  2396. const ggml_type type;
  2397. const std::array<int64_t, 4> ne;
  2398. std::string vars() override {
  2399. return VARS_TO_STR2(type, ne);
  2400. }
  2401. test_cross_entropy_loss(ggml_type type = GGML_TYPE_F32,
  2402. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2403. : type(type), ne(ne) {}
  2404. ggml_tensor * build_graph(ggml_context * ctx) override {
  2405. ggml_tensor * logits = ggml_new_tensor(ctx, type, 4, ne.data());
  2406. ggml_set_param(ctx, logits);
  2407. ggml_set_name(logits, "logits");
  2408. ggml_tensor * labels = ggml_new_tensor(ctx, type, 4, ne.data());
  2409. // The labels are assumed to be constant -> no gradients.
  2410. ggml_set_name(labels, "labels");
  2411. // Ensure labels add up to 1:
  2412. labels = ggml_soft_max(ctx, labels);
  2413. ggml_set_name(labels, "labels_normalized");
  2414. ggml_tensor * out = ggml_cross_entropy_loss(ctx, logits, labels);
  2415. ggml_set_name(out, "out");
  2416. return out;
  2417. }
  2418. void initialize_tensors(ggml_context * ctx) override {
  2419. // For larger abs. diffs between logits softmax is more linear, therefore more precise num. gradients.
  2420. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2421. init_tensor_uniform(t, -100.0f, 100.0f);
  2422. }
  2423. }
  2424. float grad_eps() override {
  2425. return 1.0f;
  2426. }
  2427. bool grad_precise() override {
  2428. return true;
  2429. }
  2430. };
  2431. // GGML_OP_OPT_STEP_ADAMW
  2432. struct test_opt_step_adamw : public test_case {
  2433. const ggml_type type;
  2434. const std::array<int64_t, 4> ne;
  2435. std::string vars() override {
  2436. return VARS_TO_STR2(type, ne);
  2437. }
  2438. test_opt_step_adamw(ggml_type type = GGML_TYPE_F32,
  2439. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2440. : type(type), ne(ne) {}
  2441. ggml_tensor * build_graph(ggml_context * ctx) override {
  2442. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  2443. ggml_set_param(ctx, a); // Despite tensor a having gradients the output tensor will not.
  2444. ggml_set_name(a, "a");
  2445. ggml_tensor * grad = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  2446. ggml_set_name(grad, "grad");
  2447. ggml_tensor * grad_m = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  2448. ggml_set_name(grad_m, "grad_m");
  2449. ggml_tensor * grad_v = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  2450. ggml_set_name(grad_v, "grad_v");
  2451. ggml_tensor * adamw_params = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 7);
  2452. ggml_set_name(adamw_params, "adamw_params");
  2453. ggml_tensor * out = ggml_opt_step_adamw(ctx, a, grad, grad_m, grad_v, adamw_params);
  2454. ggml_set_name(out, "out");
  2455. return out;
  2456. }
  2457. void initialize_tensors(ggml_context * ctx) override {
  2458. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2459. init_tensor_uniform(t, 0.0f, 1.0f); // grad_v and adamw_params need non-negative values.
  2460. }
  2461. }
  2462. bool grad_precise() override {
  2463. return true;
  2464. }
  2465. };
  2466. enum llm_norm_type {
  2467. LLM_NORM,
  2468. LLM_NORM_RMS,
  2469. };
  2470. struct llama_hparams {
  2471. uint32_t n_vocab;
  2472. uint32_t n_embd;
  2473. uint32_t n_head;
  2474. uint32_t n_head_kv;
  2475. static constexpr uint32_t n_layer = 1;
  2476. uint32_t n_rot;
  2477. uint32_t n_embd_head; // dimension of values (d_v)
  2478. uint32_t n_ff;
  2479. float f_norm_eps;
  2480. float f_norm_rms_eps;
  2481. // cparams
  2482. static constexpr uint32_t n_ctx = 512; // user-specified context size
  2483. static constexpr uint32_t n_ctx_orig = n_ctx;
  2484. // batch
  2485. int32_t n_tokens;
  2486. // llm_build_context
  2487. static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx
  2488. static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache
  2489. uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads
  2490. return n_embd_head * n_head_kv;
  2491. }
  2492. };
  2493. // LLM base class
  2494. struct test_llm : public test_case {
  2495. llama_hparams hp;
  2496. protected:
  2497. test_llm(llama_hparams hp)
  2498. : hp(std::move(hp)) {
  2499. }
  2500. public:
  2501. struct ggml_tensor * llm_build_norm(
  2502. struct ggml_context * ctx,
  2503. struct ggml_tensor * cur,
  2504. struct ggml_tensor * mw,
  2505. struct ggml_tensor * mb,
  2506. llm_norm_type type) {
  2507. switch (type) {
  2508. case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break;
  2509. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break;
  2510. }
  2511. cur = ggml_mul(ctx, cur, mw);
  2512. if (mb) {
  2513. cur = ggml_add(ctx, cur, mb);
  2514. }
  2515. return cur;
  2516. }
  2517. void llm_build_kv_store(
  2518. struct ggml_context * ctx,
  2519. struct ggml_tensor * k_l,
  2520. struct ggml_tensor * v_l,
  2521. struct ggml_tensor * k_cur,
  2522. struct ggml_tensor * v_cur) {
  2523. // compute the transposed [n_tokens, n_embd] V matrix
  2524. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens));
  2525. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(),
  2526. (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head);
  2527. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(),
  2528. ( hp.n_ctx)*ggml_element_size(v_l),
  2529. (hp.kv_head)*ggml_element_size(v_l));
  2530. // important: storing RoPE-ed version of K in the KV cache!
  2531. ggml_cpy(ctx, k_cur, k_cache_view);
  2532. ggml_cpy(ctx, v_cur_t, v_cache_view);
  2533. }
  2534. struct ggml_tensor * llm_build_kqv(
  2535. struct ggml_context * ctx,
  2536. struct ggml_tensor * k_l,
  2537. struct ggml_tensor * v_l,
  2538. struct ggml_tensor * q_cur,
  2539. struct ggml_tensor * kq_mask,
  2540. float kq_scale) {
  2541. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  2542. struct ggml_tensor * k =
  2543. ggml_view_3d(ctx, k_l,
  2544. hp.n_embd_head, hp.n_kv, hp.n_head_kv,
  2545. ggml_row_size(k_l->type, hp.n_embd_gqa()),
  2546. ggml_row_size(k_l->type, hp.n_embd_head),
  2547. 0);
  2548. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  2549. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
  2550. // split cached v into n_head heads
  2551. struct ggml_tensor * v =
  2552. ggml_view_3d(ctx, v_l,
  2553. hp.n_kv, hp.n_embd_head, hp.n_head_kv,
  2554. ggml_element_size(v_l)*hp.n_ctx,
  2555. ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head,
  2556. 0);
  2557. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  2558. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  2559. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens);
  2560. struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  2561. cur = ggml_mul_mat(ctx, wo, cur);
  2562. return cur;
  2563. }
  2564. void initialize_tensors(ggml_context * ctx) override {
  2565. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2566. if (t->type == GGML_TYPE_I32) {
  2567. // pos
  2568. std::vector<int> data(hp.n_tokens);
  2569. for (int i = 0; i < hp.n_tokens; i++) {
  2570. data[i] = rand() % hp.n_ctx;
  2571. }
  2572. ggml_backend_tensor_set(t, data.data(), 0, hp.n_tokens * sizeof(int));
  2573. } else {
  2574. init_tensor_uniform(t);
  2575. }
  2576. }
  2577. }
  2578. };
  2579. // Llama
  2580. struct test_llama : public test_llm {
  2581. static constexpr float freq_base = 10000.0f;
  2582. static constexpr float freq_scale = 1.0f;
  2583. static constexpr float ext_factor = 0.0f;
  2584. static constexpr float attn_factor = 1.0f;
  2585. static constexpr float beta_fast = 32.0f;
  2586. static constexpr float beta_slow = 1.0f;
  2587. std::string op_desc(ggml_tensor * t) override {
  2588. GGML_UNUSED(t);
  2589. return "LLAMA";
  2590. }
  2591. std::string vars() override {
  2592. auto n_tokens = hp.n_tokens;
  2593. return VARS_TO_STR1(n_tokens);
  2594. }
  2595. double max_nmse_err() override {
  2596. return 2e-3;
  2597. }
  2598. test_llama(int n_tokens = 1)
  2599. : test_llm({
  2600. /*n_vocab =*/ 32000,
  2601. /*n_embd =*/ 3200,
  2602. /*n_head =*/ 32,
  2603. /*n_head_kv =*/ 32,
  2604. /*n_rot =*/ 100,
  2605. /*n_embd_head =*/ 100,
  2606. /*n_ff =*/ 8640,
  2607. /*f_norm_eps =*/ 0.f,
  2608. /*f_norm_rms_eps =*/ 1e-5f,
  2609. /*n_tokens =*/ n_tokens,
  2610. }) {
  2611. }
  2612. ggml_tensor * build_graph(ggml_context * ctx) override {
  2613. struct ggml_tensor * cur;
  2614. struct ggml_tensor * inpL;
  2615. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  2616. // inp_pos - contains the positions
  2617. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  2618. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  2619. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  2620. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  2621. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  2622. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  2623. struct ggml_tensor * inpSA = inpL;
  2624. // norm
  2625. ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2626. cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS);
  2627. // self-attention
  2628. {
  2629. ggml_tensor * wq = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  2630. ggml_tensor * wk = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  2631. ggml_tensor * wv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  2632. // compute Q and K and RoPE them
  2633. struct ggml_tensor * Qcur = ggml_mul_mat(ctx, wq, cur);
  2634. struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur);
  2635. struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur);
  2636. Qcur = ggml_rope_ext(
  2637. ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr,
  2638. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  2639. ext_factor, attn_factor, beta_fast, beta_slow
  2640. );
  2641. Kcur = ggml_rope_ext(
  2642. ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr,
  2643. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  2644. ext_factor, attn_factor, beta_fast, beta_slow
  2645. );
  2646. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  2647. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  2648. }
  2649. struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA);
  2650. // feed-forward network
  2651. ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2652. cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS);
  2653. ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  2654. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  2655. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  2656. struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur);
  2657. cur = ggml_mul_mat(ctx, ffn_gate, cur);
  2658. cur = ggml_silu(ctx, cur);
  2659. cur = ggml_mul(ctx, cur, tmp);
  2660. cur = ggml_mul_mat(ctx, ffn_down, cur);
  2661. cur = ggml_add(ctx, cur, ffn_inp);
  2662. // input for next layer
  2663. inpL = cur;
  2664. }
  2665. cur = inpL;
  2666. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2667. cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS);
  2668. // lm_head
  2669. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab);
  2670. cur = ggml_mul_mat(ctx, output, cur);
  2671. return cur;
  2672. }
  2673. };
  2674. // Falcon
  2675. struct test_falcon : public test_llm {
  2676. static constexpr float freq_base = 10000.0f;
  2677. static constexpr float freq_scale = 1.0f;
  2678. static constexpr float ext_factor = 0.0f;
  2679. static constexpr float attn_factor = 1.0f;
  2680. static constexpr float beta_fast = 32.0f;
  2681. static constexpr float beta_slow = 1.0f;
  2682. std::string op_desc(ggml_tensor * t) override {
  2683. GGML_UNUSED(t);
  2684. return "FALCON";
  2685. }
  2686. std::string vars() override {
  2687. auto n_tokens = hp.n_tokens;
  2688. return VARS_TO_STR1(n_tokens);
  2689. }
  2690. double max_nmse_err() override {
  2691. return 2e-3;
  2692. }
  2693. test_falcon(int n_tokens = 1)
  2694. : test_llm({
  2695. /*n_vocab =*/ 32000,
  2696. /*n_embd =*/ 3200,
  2697. /*n_head =*/ 50,
  2698. /*n_head_kv =*/ 1,
  2699. /*n_rot =*/ 64,
  2700. /*n_embd_head =*/ 64,
  2701. /*n_ff =*/ 8640,
  2702. /*f_norm_eps =*/ 1e-5f,
  2703. /*f_norm_rms_eps =*/ 0.f,
  2704. /*n_tokens =*/ n_tokens,
  2705. }) {
  2706. }
  2707. ggml_tensor * build_graph(ggml_context * ctx) override {
  2708. struct ggml_tensor * cur;
  2709. struct ggml_tensor * inpL;
  2710. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  2711. // inp_pos - contains the positions
  2712. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  2713. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  2714. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  2715. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  2716. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  2717. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  2718. // norm
  2719. ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2720. ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2721. ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM);
  2722. // self-attention
  2723. {
  2724. cur = attn_norm;
  2725. ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa());
  2726. cur = ggml_mul_mat(ctx, wqkv, cur);
  2727. struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd)));
  2728. struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd)));
  2729. struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa())));
  2730. Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens);
  2731. Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens);
  2732. // using mode = 2 for neox mode
  2733. Qcur = ggml_rope_ext(
  2734. ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  2735. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  2736. );
  2737. Kcur = ggml_rope_ext(
  2738. ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  2739. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  2740. );
  2741. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  2742. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  2743. }
  2744. struct ggml_tensor * ffn_inp = cur;
  2745. // feed forward
  2746. {
  2747. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  2748. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  2749. cur = attn_norm;
  2750. cur = ggml_mul_mat(ctx, ffn_up, cur);
  2751. cur = ggml_gelu(ctx, cur);
  2752. cur = ggml_mul_mat(ctx, ffn_down, cur);
  2753. }
  2754. cur = ggml_add(ctx, cur, ffn_inp);
  2755. cur = ggml_add(ctx, cur, inpL);
  2756. // input for next layer
  2757. inpL = cur;
  2758. }
  2759. cur = inpL;
  2760. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2761. ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  2762. cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM);
  2763. // lm_head
  2764. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab);
  2765. cur = ggml_mul_mat(ctx, output, cur);
  2766. return cur;
  2767. }
  2768. };
  2769. // ###########################################
  2770. // ## Section 3: GGML Op Test Instantiation ##
  2771. // ###########################################
  2772. static const ggml_type all_types[] = {
  2773. GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
  2774. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  2775. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  2776. GGML_TYPE_Q8_0,
  2777. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  2778. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  2779. GGML_TYPE_Q6_K,
  2780. // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends
  2781. GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  2782. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  2783. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  2784. };
  2785. static const ggml_type base_types[] = {
  2786. GGML_TYPE_F32, GGML_TYPE_F16,
  2787. GGML_TYPE_Q8_0, // for I8MM tests
  2788. GGML_TYPE_Q4_0,
  2789. GGML_TYPE_Q4_1, // for I8MM tests
  2790. GGML_TYPE_Q4_K,
  2791. GGML_TYPE_IQ2_XXS
  2792. };
  2793. static const ggml_type other_types[] = {
  2794. GGML_TYPE_Q4_1,
  2795. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  2796. GGML_TYPE_Q8_0,
  2797. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  2798. GGML_TYPE_Q5_K,
  2799. GGML_TYPE_Q6_K,
  2800. // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends
  2801. GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  2802. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  2803. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  2804. GGML_TYPE_BF16,
  2805. };
  2806. // Test cases for evaluation: should try to cover edge cases while using small input sizes to keep the runtime low
  2807. static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
  2808. std::vector<std::unique_ptr<test_case>> test_cases;
  2809. std::default_random_engine rng(0);
  2810. // unary ops
  2811. for (int v : {0, 1}) {
  2812. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  2813. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 128, 2, 2, 2 }, v));
  2814. test_cases.emplace_back(new test_unary((ggml_unary_op) op, GGML_TYPE_F32, { 5, 7, 11, 13 }, v));
  2815. }
  2816. }
  2817. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  2818. for (ggml_type type : all_types) {
  2819. for (int b : {1, 7}) {
  2820. for (bool v : {false, true}) {
  2821. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  2822. }
  2823. }
  2824. }
  2825. for (int b : {1, 7}) {
  2826. for (bool v : {false, true}) {
  2827. test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v));
  2828. }
  2829. }
  2830. for (ggml_type type_input : {GGML_TYPE_F32}) {
  2831. for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
  2832. for (int k0 : {1, 3}) {
  2833. for (int k1 : {1, 3}) {
  2834. for (int s0 : {1, 2}) {
  2835. for (int s1 : {1, 2}) {
  2836. for (int p0 : {0, 1}) {
  2837. for (int p1 : {0, 1}) {
  2838. test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1));
  2839. }
  2840. }
  2841. }
  2842. }
  2843. }
  2844. }
  2845. }
  2846. }
  2847. // im2col 1D
  2848. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  2849. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  2850. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  2851. for (int s0 : {1, 3}) {
  2852. for (int p0 : {0, 3}) {
  2853. for (int d0 : {1, 3}) {
  2854. test_cases.emplace_back(new test_im2col(
  2855. GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 2, 2, 1}, {3, 2, 2, 1},
  2856. s0, 0, p0, 0, d0, 0, false));
  2857. }
  2858. }
  2859. }
  2860. // im2col 2D
  2861. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32));
  2862. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
  2863. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
  2864. for (int s0 : {1, 3}) {
  2865. for (int s1 : {1, 3}) {
  2866. for (int p0 : {0, 3}) {
  2867. for (int p1 : {0, 3}) {
  2868. for (int d0 : {1, 3}) {
  2869. for (int d1 : {1, 3}) {
  2870. test_cases.emplace_back(new test_im2col(
  2871. GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 20, 2, 2}, {3, 3, 2, 2},
  2872. s0, s1, p0, p1, d0, d1, true));
  2873. }
  2874. }
  2875. }
  2876. }
  2877. }
  2878. }
  2879. // extra tests for im2col 2D
  2880. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 32}, {3, 3, 1, 32}, 1, 1, 1, 1, 1, 1, true));
  2881. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 32}, {3, 3, 2, 32}, 1, 1, 1, 1, 1, 1, true));
  2882. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 1024}, {3, 3, 1, 1024}, 1, 1, 1, 1, 1, 1, true));
  2883. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 1024}, {3, 3, 2, 1024}, 1, 1, 1, 1, 1, 1, true));
  2884. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2048}, {3, 3, 1, 2048}, 1, 1, 1, 1, 1, 1, true));
  2885. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2048}, {3, 3, 2, 2048}, 1, 1, 1, 1, 1, 1, true));
  2886. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2560}, {3, 3, 1, 2560}, 1, 1, 1, 1, 1, 1, true));
  2887. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2560}, {3, 3, 2, 2560}, 1, 1, 1, 1, 1, 1, true));
  2888. // sycl backend will limit task global_range < MAX_INT
  2889. // test cases for 2D im2col with large input W and H (occurs in stable-diffusion)
  2890. // however these cases need to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  2891. // these cases are verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  2892. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  2893. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  2894. test_cases.emplace_back(new test_conv_transpose_1d());
  2895. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1));
  2896. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1));
  2897. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1));
  2898. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1));
  2899. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1));
  2900. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1));
  2901. test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1));
  2902. test_cases.emplace_back(new test_count_equal());
  2903. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 1, 1, 1}));
  2904. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {100, 10, 1, 1}));
  2905. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
  2906. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 12, 1, 1}));
  2907. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {2000, 10, 1, 1}));
  2908. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {5438, 3, 1, 1}));
  2909. for (int ne3 : {1, 3}) { // CUDA backward pass only supports ne3 == 1
  2910. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 1}));
  2911. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {2, 1, 1, 1}));
  2912. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 2, 1, 1}));
  2913. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 2, 1}));
  2914. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 2}));
  2915. test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 5, 4, ne3}, {2, 1, 1, 1}));
  2916. test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 5, 4, ne3}, {1, 1, 1, 2}));
  2917. }
  2918. test_cases.emplace_back(new test_dup(GGML_TYPE_F32));
  2919. test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
  2920. test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
  2921. test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
  2922. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {0, 2, 1, 3}));
  2923. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {0, 2, 1, 3})); // dup by rows
  2924. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {1, 0, 2, 3}));
  2925. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {1, 0, 2, 3})); // dup dst not-contiguous
  2926. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
  2927. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
  2928. for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) {
  2929. test_cases.emplace_back(new test_set(GGML_TYPE_F32, GGML_TYPE_F32, {6, 5, 4, 3}, dim));
  2930. }
  2931. for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) {
  2932. test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, {6, 5, 4, 3}, dim));
  2933. }
  2934. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  2935. for (ggml_type type_dst : all_types) {
  2936. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  2937. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
  2938. }
  2939. }
  2940. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  2941. for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  2942. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
  2943. }
  2944. }
  2945. test_cases.emplace_back(new test_cont());
  2946. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 1 ,1}));
  2947. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 3 ,5}));
  2948. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 3, 5 ,7}));
  2949. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 1 ,1}));
  2950. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 3 ,5}));
  2951. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 3, 5 ,7}));
  2952. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 1 ,1}));
  2953. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 3 ,5}));
  2954. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 3, 5 ,7}));
  2955. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  2956. for (auto op : {ggml_add, ggml_mul, ggml_div}) {
  2957. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  2958. }
  2959. };
  2960. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 8, 1}, {1, 1, 1, 1});
  2961. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1, 1}, {32, 1, 1, 1});
  2962. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 320, 320}, {1, 1, 1, 1});
  2963. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 1, 1}, {1, 1, 1, 1});
  2964. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 1}, {1, 1, 1, 1});
  2965. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 1, 1, 1});
  2966. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {2, 1, 1, 1});
  2967. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 2, 1, 1});
  2968. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 1, 2, 1});
  2969. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 1, 1, 2});
  2970. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 1, 2, 2});
  2971. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {1, 2, 2, 2});
  2972. add_test_bin_bcast(GGML_TYPE_F32, {10, 5, 4, 3}, {2, 2, 2, 2});
  2973. // stable diffusion
  2974. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 1, 1, 1});
  2975. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 16, 16, 1});
  2976. add_test_bin_bcast(GGML_TYPE_F32, {1280, 16, 16, 1}, {1, 1, 1, 1});
  2977. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 256, 1, 1});
  2978. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {16, 16, 1, 1});
  2979. add_test_bin_bcast(GGML_TYPE_F32, {16, 16, 1280, 1}, {1, 1, 1, 1});
  2980. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {16, 16, 1, 1});
  2981. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 2560, 1}, {16, 16, 1, 1});
  2982. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {32, 32, 1, 1});
  2983. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {32, 32, 1, 1});
  2984. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 640, 1}, {32, 32, 1, 1});
  2985. add_test_bin_bcast(GGML_TYPE_F32, {5120, 1, 1, 1}, {1, 256, 1, 1});
  2986. add_test_bin_bcast(GGML_TYPE_F32, {640, 1, 1, 1}, {1, 1, 1, 1});
  2987. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  2988. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  2989. test_cases.emplace_back(new test_add1());
  2990. test_cases.emplace_back(new test_scale());
  2991. for (float eps : {1e-6f, 1e-5f, 1e-3f, 1e-1f}) {
  2992. test_cases.emplace_back(new test_norm(GGML_TYPE_F32, {64, 5, 4, 3}, eps));
  2993. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 5, 4, 3}, eps));
  2994. }
  2995. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 1, 1}, {4, 1536, 1, 1}));
  2996. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {8, 1536, 1, 1}, {4, 1536, 1, 1}));
  2997. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 4, 1}, {4, 1536, 1, 1}));
  2998. test_cases.emplace_back(new test_ssm_scan(GGML_TYPE_F32, 16, 1024, 32, 4));
  2999. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 1, 1));
  3000. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 1));
  3001. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 4));
  3002. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 128, 4));
  3003. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 1, 1));
  3004. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 1));
  3005. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 4));
  3006. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 128, 4));
  3007. for (int i = 1; i < 9; ++i) {
  3008. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3009. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3010. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_1, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3011. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3012. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_1, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3013. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q8_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3014. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3015. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3016. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q6_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3017. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_IQ4_NL, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3018. }
  3019. #if 1
  3020. for (ggml_type type_a : base_types) {
  3021. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3022. // test cases without permutation
  3023. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  3024. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1}));
  3025. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1}));
  3026. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1}));
  3027. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1}));
  3028. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2}));
  3029. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2}));
  3030. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1}));
  3031. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1}));
  3032. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1}));
  3033. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1}));
  3034. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1}));
  3035. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2}));
  3036. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
  3037. // test cases with permutation
  3038. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3039. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3040. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3041. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3042. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3043. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3044. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3045. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3046. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3047. }
  3048. }
  3049. for (ggml_type type_a : other_types) {
  3050. for (ggml_type type_b : {GGML_TYPE_F32}) {
  3051. if (ggml_blck_size(type_a) != 256) {
  3052. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, ggml_blck_size(type_a), {1, 1}, {1, 1}));
  3053. }
  3054. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1}));
  3055. }
  3056. }
  3057. #else
  3058. // m = a rows
  3059. // n = b rows
  3060. // k = cols
  3061. std::uniform_int_distribution<> dist_m(1, 128);
  3062. std::uniform_int_distribution<> dist_n(16, 128);
  3063. std::uniform_int_distribution<> dist_k(1, 16);
  3064. for (int i = 0; i < 1000; i++) {
  3065. for (ggml_type type_a : all_types) {
  3066. for (ggml_type type_b : {GGML_TYPE_F32}) {
  3067. int m = dist_m(rng);
  3068. int n = dist_n(rng);
  3069. int k = dist_k(rng) * ggml_blck_size(type_a);
  3070. test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1}, {1, 1}));
  3071. }
  3072. }
  3073. }
  3074. #endif
  3075. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
  3076. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
  3077. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
  3078. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
  3079. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
  3080. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
  3081. // sycl backend will limit task global_range < MAX_INT
  3082. // test case for f16-type-convert-to-fp32 kernel with large k under fp32 compute dtype (occurs in stable-diffusion)
  3083. // however this case needs to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  3084. // this case is verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  3085. // test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 512, 262144, 9216, {1, 1}, {1, 1}));
  3086. for (ggml_type type_a : base_types) {
  3087. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  3088. for (int n_mats : {4, 8}) {
  3089. for (int n_used : {1, 2, 4}) {
  3090. for (bool b : {false, true}) {
  3091. for (int n : {1, 32}) {
  3092. int m = 512;
  3093. int k = 256;
  3094. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  3095. }
  3096. }
  3097. }
  3098. }
  3099. }
  3100. }
  3101. for (ggml_type type_a : other_types) {
  3102. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  3103. for (int n_mats : {4}) {
  3104. for (int n_used : {2}) {
  3105. for (bool b : {false}) {
  3106. for (int n : {1, 32}) {
  3107. int m = 512;
  3108. int k = 256;
  3109. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  3110. }
  3111. }
  3112. }
  3113. }
  3114. }
  3115. }
  3116. for (ggml_type type_a : base_types) {
  3117. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3118. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, { 1, 1}));
  3119. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 1}));
  3120. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 1}));
  3121. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 10}));
  3122. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 10}));
  3123. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 10}));
  3124. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 1, 16, {10, 10}));
  3125. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, { 1, 1}));
  3126. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, { 1, 1}, true));
  3127. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 1}));
  3128. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 1}));
  3129. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 10}));
  3130. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 10}));
  3131. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 10}));
  3132. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, 16, 16, {10, 10}));
  3133. }
  3134. }
  3135. test_cases.emplace_back(new test_sqr());
  3136. test_cases.emplace_back(new test_sqrt());
  3137. test_cases.emplace_back(new test_log());
  3138. test_cases.emplace_back(new test_sin());
  3139. test_cases.emplace_back(new test_cos());
  3140. test_cases.emplace_back(new test_clamp());
  3141. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  3142. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 1}, 5));
  3143. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 2}, 5));
  3144. #if 0
  3145. std::uniform_int_distribution<> dist_ne1(1, 50);
  3146. int exponent = 1;
  3147. while (exponent < (1 << 17)) {
  3148. std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
  3149. for (int n = 0; n < 10; ++n) {
  3150. int64_t ne0 = dist_ne0(rng);
  3151. int64_t ne1 = dist_ne1(rng);
  3152. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
  3153. }
  3154. exponent <<= 1;
  3155. }
  3156. #endif
  3157. for (bool mask : {false, true}) {
  3158. for (float max_bias : {0.0f, 8.0f}) {
  3159. if (!mask && max_bias > 0.0f) continue;
  3160. for (float scale : {1.0f, 0.1f}) {
  3161. for (int64_t ne0 : {16, 1024}) {
  3162. for (int64_t ne1 : {16, 1024}) {
  3163. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias));
  3164. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias));
  3165. }
  3166. }
  3167. }
  3168. }
  3169. }
  3170. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, 0.1f, 0.0f));
  3171. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
  3172. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
  3173. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
  3174. for (bool fw : {true, false}) { // fw == forward
  3175. bool all = true;
  3176. for (float v : { 0, 1 }) {
  3177. for (float fs : { 1.0f, 1.4245f }) {
  3178. for (float ef : { 0.0f, 0.7465f }) {
  3179. for (float af : { 1.0f, 1.4245f }) {
  3180. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3181. for (bool ff : {false, true}) { // freq_factors
  3182. test_cases.emplace_back(new test_rope(type, {128, 32, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 7B
  3183. if (all) {
  3184. test_cases.emplace_back(new test_rope(type, {128, 40, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 13B
  3185. test_cases.emplace_back(new test_rope(type, {128, 52, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 30B
  3186. test_cases.emplace_back(new test_rope(type, {128, 64, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 65B
  3187. }
  3188. if (all) {
  3189. test_cases.emplace_back(new test_rope(type, { 64, 1, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B)
  3190. test_cases.emplace_back(new test_rope(type, { 64, 71, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B)
  3191. test_cases.emplace_back(new test_rope(type, { 64, 8, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B)
  3192. test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 20, 2, 512, fs, ef, af, ff, v, fw)); // neox (stablelm)
  3193. test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 32, 2, 512, fs, ef, af, ff, v, fw)); // neox (phi-2)
  3194. }
  3195. if (all) {
  3196. test_cases.emplace_back(new test_rope(type, {128, 12, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 2B)
  3197. test_cases.emplace_back(new test_rope(type, {128, 28, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 7B)
  3198. test_cases.emplace_back(new test_rope(type, { 80, 16, 2, 1}, 80, GGML_ROPE_TYPE_VISION, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl ViT)
  3199. }
  3200. test_cases.emplace_back(new test_rope(type, { 64, 128, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B)
  3201. }
  3202. }
  3203. all = false;
  3204. }
  3205. }
  3206. }
  3207. }
  3208. }
  3209. for (int v : { 0, 1, 2, 3 }) {
  3210. for (int dim : { 0, 1, 2, 3, }) {
  3211. test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
  3212. test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
  3213. }
  3214. }
  3215. for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) {
  3216. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  3217. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  3218. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen
  3219. }
  3220. test_cases.emplace_back(new test_sum());
  3221. test_cases.emplace_back(new test_sum_rows());
  3222. test_cases.emplace_back(new test_mean());
  3223. test_cases.emplace_back(new test_upscale());
  3224. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
  3225. test_cases.emplace_back(new test_upscale_ext());
  3226. test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {64, 64, 320, 1}));
  3227. test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {9, 9, 1280, 1}));
  3228. test_cases.emplace_back(new test_acc());
  3229. test_cases.emplace_back(new test_pad());
  3230. test_cases.emplace_back(new test_pad_reflect_1d());
  3231. test_cases.emplace_back(new test_arange());
  3232. test_cases.emplace_back(new test_timestep_embedding());
  3233. test_cases.emplace_back(new test_leaky_relu());
  3234. for (int hs : { 64, 80, 128, 256, }) {
  3235. for (bool mask : { true, false } ) {
  3236. for (float max_bias : { 0.0f, 8.0f }) {
  3237. if (!mask && max_bias > 0.0f) continue;
  3238. for (float logit_softcap : {0.0f, 10.0f}) {
  3239. if (hs != 128 && logit_softcap != 0.0f) continue;
  3240. for (int nh : { 32, }) {
  3241. for (int kv : { 512, 1024, }) {
  3242. for (int nb : { 1, 3, 32, 35, }) {
  3243. for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
  3244. test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias, logit_softcap, type_KV));
  3245. }
  3246. }
  3247. }
  3248. }
  3249. }
  3250. }
  3251. }
  3252. }
  3253. test_cases.emplace_back(new test_cross_entropy_loss());
  3254. test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3}));
  3255. // these tests are disabled to save execution time, but they can be handy for debugging
  3256. #if 0
  3257. test_cases.emplace_back(new test_llama(1));
  3258. test_cases.emplace_back(new test_llama(2));
  3259. test_cases.emplace_back(new test_falcon(1));
  3260. test_cases.emplace_back(new test_falcon(2));
  3261. #endif
  3262. return test_cases;
  3263. }
  3264. // Test cases for performance evaluation: should be representative of real-world use cases
  3265. static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
  3266. std::vector<std::unique_ptr<test_case>> test_cases;
  3267. test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 1, 1, 1}));
  3268. test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 512, 1, 1}));
  3269. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, {512, 3072, 1, 1}));
  3270. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3}));
  3271. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3}));
  3272. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, 1.0f, 0.0f));
  3273. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, 1.0f, 0.0f));
  3274. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, 1.0f, 0.0f));
  3275. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, 1.0f, 0.0f));
  3276. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, 1.0f, 0.0f));
  3277. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, 1.0f, 0.0f));
  3278. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, 1.0f, 0.0f));
  3279. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1}));
  3280. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
  3281. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1}));
  3282. for (int bs : {1, 2, 3, 4, 5, 8, 512}) {
  3283. for (ggml_type type_a : all_types) {
  3284. for (ggml_type type_b : {GGML_TYPE_F32}) {
  3285. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1}));
  3286. }
  3287. }
  3288. }
  3289. for (int K : {3, 5}) {
  3290. for (int IC : {256, 2560}) {
  3291. for (int IW_IH : {32, 64, 256}) {
  3292. if (IC == 2560 && IW_IH == 256) {
  3293. // too big
  3294. continue;
  3295. }
  3296. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {IW_IH, IW_IH, IC, 1}, {K, K, IC, 1}, 1, 1, 1, 1, 1, 1, true));
  3297. }
  3298. }
  3299. }
  3300. return test_cases;
  3301. }
  3302. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
  3303. if (mode == MODE_TEST) {
  3304. auto test_cases = make_test_cases_eval();
  3305. ggml_backend_t backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, NULL);
  3306. if (backend_cpu == NULL) {
  3307. printf(" Failed to initialize CPU backend\n");
  3308. return false;
  3309. }
  3310. size_t n_ok = 0;
  3311. for (auto & test : test_cases) {
  3312. if (test->eval(backend, backend_cpu, op_name)) {
  3313. n_ok++;
  3314. }
  3315. }
  3316. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  3317. ggml_backend_free(backend_cpu);
  3318. return n_ok == test_cases.size();
  3319. }
  3320. if (mode == MODE_GRAD) {
  3321. auto test_cases = make_test_cases_eval();
  3322. size_t n_ok = 0;
  3323. for (auto & test : test_cases) {
  3324. if (test->eval_grad(backend, op_name)) {
  3325. n_ok++;
  3326. }
  3327. }
  3328. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  3329. return n_ok == test_cases.size();
  3330. }
  3331. if (mode == MODE_PERF) {
  3332. auto test_cases = make_test_cases_perf();
  3333. for (auto & test : test_cases) {
  3334. test->eval_perf(backend, op_name);
  3335. }
  3336. return true;
  3337. }
  3338. GGML_ABORT("fatal error");
  3339. }
  3340. static void usage(char ** argv) {
  3341. printf("Usage: %s [mode] [-o op] [-b backend]\n", argv[0]);
  3342. printf(" valid modes:\n");
  3343. printf(" - test (default, compare with CPU backend for correctness)\n");
  3344. printf(" - grad (compare gradients from backpropagation with method of finite differences)\n");
  3345. printf(" - perf (performance evaluation)\n");
  3346. printf(" op names for -o are as given by ggml_op_desc() (e.g. ADD, MUL_MAT, etc)\n");
  3347. }
  3348. int main(int argc, char ** argv) {
  3349. test_mode mode = MODE_TEST;
  3350. const char * op_name_filter = NULL;
  3351. const char * backend_filter = NULL;
  3352. for (int i = 1; i < argc; i++) {
  3353. if (strcmp(argv[i], "test") == 0) {
  3354. mode = MODE_TEST;
  3355. } else if (strcmp(argv[i], "perf") == 0) {
  3356. mode = MODE_PERF;
  3357. } else if (strcmp(argv[i], "grad") == 0) {
  3358. mode = MODE_GRAD;
  3359. } else if (strcmp(argv[i], "-o") == 0) {
  3360. if (i + 1 < argc) {
  3361. op_name_filter = argv[++i];
  3362. } else {
  3363. usage(argv);
  3364. return 1;
  3365. }
  3366. } else if (strcmp(argv[i], "-b") == 0) {
  3367. if (i + 1 < argc) {
  3368. backend_filter = argv[++i];
  3369. } else {
  3370. usage(argv);
  3371. return 1;
  3372. }
  3373. } else {
  3374. usage(argv);
  3375. return 1;
  3376. }
  3377. }
  3378. // load and enumerate backends
  3379. ggml_backend_load_all();
  3380. printf("Testing %zu devices\n\n", ggml_backend_dev_count());
  3381. size_t n_ok = 0;
  3382. for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
  3383. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  3384. printf("Backend %zu/%zu: %s\n", i + 1, ggml_backend_dev_count(), ggml_backend_dev_name(dev));
  3385. if (backend_filter != NULL && strcmp(backend_filter, ggml_backend_dev_name(dev)) != 0) {
  3386. printf(" Skipping\n");
  3387. n_ok++;
  3388. continue;
  3389. }
  3390. if (backend_filter == NULL && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU && mode != MODE_GRAD) {
  3391. printf(" Skipping CPU backend\n");
  3392. n_ok++;
  3393. continue;
  3394. }
  3395. ggml_backend_t backend = ggml_backend_dev_init(dev, NULL);
  3396. GGML_ASSERT(backend != NULL);
  3397. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  3398. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3399. if (ggml_backend_set_n_threads_fn) {
  3400. // TODO: better value for n_threads
  3401. ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency());
  3402. }
  3403. printf(" Device description: %s\n", ggml_backend_dev_description(dev));
  3404. size_t free, total; // NOLINT
  3405. ggml_backend_dev_memory(dev, &free, &total);
  3406. printf(" Device memory: %zu MB (%zu MB free)\n", total / 1024 / 1024, free / 1024 / 1024);
  3407. printf("\n");
  3408. bool ok = test_backend(backend, mode, op_name_filter);
  3409. printf(" Backend %s: ", ggml_backend_name(backend));
  3410. if (ok) {
  3411. printf("\033[1;32mOK\033[0m\n");
  3412. n_ok++;
  3413. } else {
  3414. printf("\033[1;31mFAIL\033[0m\n");
  3415. }
  3416. printf("\n");
  3417. ggml_backend_free(backend);
  3418. }
  3419. ggml_quantize_free();
  3420. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_dev_count());
  3421. if (n_ok != ggml_backend_dev_count()) {
  3422. printf("\033[1;31mFAIL\033[0m\n");
  3423. return 1;
  3424. }
  3425. printf("\033[1;32mOK\033[0m\n");
  3426. return 0;
  3427. }