test-backend-ops.cpp 191 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169
  1. // This file defines tests for various GGML ops and backends.
  2. // For the forward pass it asserts that the results of multiple backends computing the same GGML ops are consistent.
  3. // For the backward pass it asserts that the gradients from backpropagation are consistent
  4. // with the gradients obtained via the method of finite differences ("grad" mode, this is optional).
  5. // It is also possible to check the performance ("perf" mode).
  6. //
  7. // this file has three sections: Section 1 does general setup, section 2 defines the GGML ops to be tested,
  8. // and section 3 defines which tests to run.
  9. // Quick start for adding a new GGML op: Go to section 2 and create a struct that inherits from test_case,
  10. // then go to section 3 and add an instantiation of your struct.
  11. // ##############################
  12. // ## Section 1: General Setup ##
  13. // ##############################
  14. #include <ggml.h>
  15. #include <ggml-alloc.h>
  16. #include <ggml-backend.h>
  17. #include <ggml-cpp.h>
  18. #include <algorithm>
  19. #include <array>
  20. #include <cfloat>
  21. #include <cinttypes>
  22. #include <cstdint>
  23. #include <cstdio>
  24. #include <cstdlib>
  25. #include <cstring>
  26. #include <future>
  27. #include <memory>
  28. #include <random>
  29. #include <regex>
  30. #include <string>
  31. #include <thread>
  32. #include <vector>
  33. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  34. size_t nels = ggml_nelements(tensor);
  35. std::vector<float> data(nels);
  36. {
  37. // parallel initialization
  38. static const size_t n_threads = std::thread::hardware_concurrency();
  39. // static RNG initialization (revisit if n_threads stops being constant)
  40. static std::vector<std::default_random_engine> generators = []() {
  41. std::random_device rd;
  42. std::vector<std::default_random_engine> vec;
  43. vec.reserve(n_threads);
  44. //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
  45. for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
  46. return vec;
  47. }();
  48. auto init_thread = [&](size_t ith, size_t start, size_t end) {
  49. std::uniform_real_distribution<float> distribution(min, max);
  50. auto & gen = generators[ith];
  51. for (size_t i = start; i < end; i++) {
  52. data[i] = distribution(gen);
  53. }
  54. };
  55. std::vector<std::future<void>> tasks;
  56. tasks.reserve(n_threads);
  57. for (size_t i = 0; i < n_threads; i++) {
  58. size_t start = i*nels/n_threads;
  59. size_t end = (i+1)*nels/n_threads;
  60. tasks.push_back(std::async(std::launch::async, init_thread, i, start, end));
  61. }
  62. for (auto & t : tasks) {
  63. t.get();
  64. }
  65. }
  66. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  67. ggml_backend_tensor_set(tensor, data.data(), 0, nels * sizeof(float));
  68. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
  69. GGML_ASSERT(nels % ggml_blck_size(tensor->type) == 0);
  70. // dummy importance matrix
  71. std::vector<float> imatrix(tensor->ne[0], 1.0f);
  72. const float * im = imatrix.data();
  73. if (!ggml_quantize_requires_imatrix(tensor->type)) {
  74. // when the imatrix is optional, we want to test both quantization with and without imatrix
  75. // use one of the random numbers to decide
  76. if (data[0] > 0.5f*(min + max)) {
  77. im = nullptr;
  78. }
  79. }
  80. std::vector<uint8_t> dataq(ggml_row_size(tensor->type, nels));
  81. {
  82. // parallel quantization by block
  83. size_t blck_size = ggml_blck_size(tensor->type);
  84. size_t n_blocks = nels / blck_size;
  85. auto quantize_thread = [&](size_t start, size_t end) {
  86. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(),
  87. start * blck_size, end - start, blck_size, im);
  88. };
  89. const size_t min_blocks_per_thread = 1;
  90. const size_t n_threads = std::min<size_t>(std::thread::hardware_concurrency()/2,
  91. std::max<size_t>(1, n_blocks / min_blocks_per_thread));
  92. std::vector<std::future<void>> tasks;
  93. tasks.reserve(n_threads);
  94. for (size_t i = 0; i < n_threads; i++) {
  95. size_t start = i*n_blocks/n_threads;
  96. size_t end = (i+1)*n_blocks/n_threads;
  97. tasks.push_back(std::async(std::launch::async, quantize_thread, start, end));
  98. }
  99. for (auto & t : tasks) {
  100. t.get();
  101. }
  102. }
  103. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  104. } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
  105. // This is going to create some weird integers though.
  106. ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
  107. } else if (tensor->type == GGML_TYPE_I64) {
  108. // Integers with a size of 8 bytes can be set by mirroring the float data, the specific values are again not really meaningful.
  109. const size_t nbytes_half = ggml_nbytes(tensor)/2;
  110. ggml_backend_tensor_set(tensor, data.data(), 0*nbytes_half, nbytes_half);
  111. ggml_backend_tensor_set(tensor, data.data(), 1*nbytes_half, nbytes_half);
  112. } else {
  113. GGML_ABORT("fatal error");
  114. }
  115. }
  116. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  117. std::vector<float> tv;
  118. tv.reserve(ggml_nelements(t));
  119. std::vector<uint8_t> buf(ggml_nbytes(t));
  120. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  121. const auto * tt = ggml_get_type_traits(t->type);
  122. size_t bs = ggml_blck_size(t->type);
  123. std::vector<float> vq(ggml_blck_size(t->type));
  124. bool quantized = ggml_is_quantized(t->type);
  125. // access elements by index to avoid gaps in views
  126. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  127. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  128. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  129. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  130. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  131. if (t->type == GGML_TYPE_F16) {
  132. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  133. } else if (t->type == GGML_TYPE_BF16) {
  134. tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
  135. } else if (t->type == GGML_TYPE_F32) {
  136. tv.push_back(*(float *) &buf[i]);
  137. } else if (t->type == GGML_TYPE_I64) {
  138. tv.push_back((float)*(int64_t *) &buf[i]);
  139. } else if (t->type == GGML_TYPE_I32) {
  140. tv.push_back((float)*(int32_t *) &buf[i]);
  141. } else if (t->type == GGML_TYPE_I16) {
  142. tv.push_back((float)*(int16_t *) &buf[i]);
  143. } else if (t->type == GGML_TYPE_I8) {
  144. tv.push_back((float)*(int8_t *) &buf[i]);
  145. } else if (quantized) {
  146. tt->to_float(&buf[i], vq.data(), bs);
  147. tv.insert(tv.end(), vq.begin(), vq.end());
  148. } else {
  149. GGML_ABORT("fatal error");
  150. }
  151. }
  152. }
  153. }
  154. }
  155. return tv;
  156. }
  157. // normalized mean squared error = mse(a, b) / mse(a, 0)
  158. static double nmse(const float * a, const float * b, size_t n) {
  159. double mse_a_b = 0.0;
  160. double mse_a_0 = 0.0;
  161. for (size_t i = 0; i < n; i++) {
  162. float a_i = a[i];
  163. float b_i = b[i];
  164. mse_a_b += (a_i - b_i) * (a_i - b_i);
  165. mse_a_0 += a_i * a_i;
  166. }
  167. return mse_a_b / mse_a_0;
  168. }
  169. // maximum absolute asymmetry between a and b
  170. // asymmetry: (a - b) / (a + b)
  171. // This is more stable than relative error if one of the values fluctuates towards zero.
  172. // n: number of values to compare.
  173. // expected_vals: optional vector of expected values for a. If expected_vals is not empty, filter out all comparisons where
  174. // a does not match any of the expected values. Needed for noncontinuous gradients where the numerical calculation can fail.
  175. static double mean_abs_asymm(const float * a, const float * b, const size_t n, const std::vector<float> & expected_vals) {
  176. double sum = 0.0f;
  177. size_t nvalid = 0;
  178. for (size_t i = 0; i < n; i++) {
  179. if (!expected_vals.empty()) {
  180. bool matches_any = false;
  181. for (const float & ev : expected_vals) {
  182. if (fabsf(a[i] - ev) < 1e-3f) {
  183. matches_any = true;
  184. break;
  185. }
  186. }
  187. if (!matches_any) {
  188. continue;
  189. }
  190. }
  191. const float asymm = (a[i] - b[i]) / (a[i] + b[i]);
  192. sum += fabsf(asymm);
  193. nvalid++;
  194. }
  195. return sum/nvalid;
  196. }
  197. // utils for printing the variables of the test cases
  198. template<typename T>
  199. static std::string var_to_str(const T & x) {
  200. return std::to_string(x);
  201. }
  202. template<typename T, size_t N>
  203. static std::string var_to_str(const T (&x)[N]) {
  204. std::string s = "[";
  205. for (size_t i = 0; i < N; i++) {
  206. if (i > 0) {
  207. s += ",";
  208. }
  209. s += var_to_str(x[i]);
  210. }
  211. s += "]";
  212. return s;
  213. }
  214. template<typename T, size_t N>
  215. static std::string var_to_str(const std::array<T, N> & x) {
  216. std::string s = "[";
  217. for (size_t i = 0; i < N; i++) {
  218. if (i > 0) {
  219. s += ",";
  220. }
  221. s += var_to_str(x[i]);
  222. }
  223. s += "]";
  224. return s;
  225. }
  226. static std::string var_to_str(ggml_type type) {
  227. return ggml_type_name(type);
  228. }
  229. static std::string var_to_str(ggml_prec prec) {
  230. return prec == GGML_PREC_F32 ? "f32" : "def";
  231. }
  232. static std::string var_to_str(ggml_op_pool pool) {
  233. switch (pool) {
  234. case GGML_OP_POOL_AVG: return "avg";
  235. case GGML_OP_POOL_MAX: return "max";
  236. default: return std::to_string(pool);
  237. }
  238. }
  239. static std::string var_to_str(ggml_scale_mode mode) {
  240. switch (mode) {
  241. case GGML_SCALE_MODE_NEAREST: return "nearest";
  242. case GGML_SCALE_MODE_BILINEAR: return "bilinear";
  243. default: return std::to_string(mode);
  244. }
  245. }
  246. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  247. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  248. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  249. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  250. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  251. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  252. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  253. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  254. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  255. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  256. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  257. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  258. #define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
  259. #ifdef GGML_USE_SYCL
  260. static bool inline _isinf(float f) {
  261. return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
  262. }
  263. #else
  264. static bool inline _isinf(float f) { return std::isinf(f); }
  265. #endif
  266. // accept FLT_MAX as infinity
  267. static bool isinf_or_max(float f) {
  268. return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  269. }
  270. static bool ggml_is_view_op(enum ggml_op op) {
  271. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  272. }
  273. enum test_mode {
  274. MODE_TEST,
  275. MODE_PERF,
  276. MODE_GRAD,
  277. };
  278. struct test_case {
  279. virtual ~test_case() {}
  280. virtual std::string op_desc(ggml_tensor * t) {
  281. return ggml_op_desc(t);
  282. }
  283. virtual std::string vars() {
  284. return "";
  285. }
  286. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  287. virtual double max_nmse_err() {
  288. return 1e-7;
  289. }
  290. virtual double max_maa_err() {
  291. return 1e-4;
  292. }
  293. virtual float grad_eps() {
  294. return 1e-1f;
  295. }
  296. // If false, estimate gradient with 2 points, neglects 3rd order derivative and higher.
  297. // If true, estimate gradient with 4 points, neglects 5th order derivative and higher.
  298. virtual bool grad_precise() {
  299. return false;
  300. }
  301. // Skip gradient checks if total number of gradients to be checked is larger than this (to speed up the tests).
  302. virtual int64_t grad_nmax() {
  303. return 10000;
  304. }
  305. // No effect if empty.
  306. // If not empty, skip all gradient checks where the numerical result does not match any of the values.
  307. // Needed for dealing with noncontinuous gradients (e.g. ReLU) where estimation using finite differences is unreliable.
  308. virtual std::vector<float> grad_expect() {
  309. return {};
  310. }
  311. virtual void initialize_tensors(ggml_context * ctx) {
  312. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  313. init_tensor_uniform(t);
  314. }
  315. }
  316. virtual size_t op_size(ggml_tensor * t) {
  317. size_t size = ggml_nbytes(t);
  318. // add source tensors
  319. for (int i = 0; i < GGML_MAX_SRC; i++) {
  320. if (t->src[i] != NULL) {
  321. size += ggml_nbytes(t->src[i]);
  322. }
  323. }
  324. return size;
  325. }
  326. virtual uint64_t op_flops(ggml_tensor * t) {
  327. GGML_UNUSED(t);
  328. return 0;
  329. }
  330. virtual bool run_whole_graph() { return false; }
  331. ggml_cgraph * gf = nullptr;
  332. ggml_cgraph * gb = nullptr;
  333. static const int sentinel_size = 1024;
  334. test_mode mode;
  335. std::vector<ggml_tensor *> sentinels;
  336. void add_sentinel(ggml_context * ctx) {
  337. if (mode == MODE_PERF || mode == MODE_GRAD) {
  338. return;
  339. }
  340. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  341. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  342. sentinels.push_back(sentinel);
  343. }
  344. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  345. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  346. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  347. add_sentinel(ctx);
  348. return t;
  349. }
  350. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  351. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  352. add_sentinel(ctx);
  353. return t;
  354. }
  355. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  356. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  357. add_sentinel(ctx);
  358. return t;
  359. }
  360. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  361. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  362. add_sentinel(ctx);
  363. return t;
  364. }
  365. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  366. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  367. add_sentinel(ctx);
  368. return t;
  369. }
  370. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  371. mode = MODE_TEST;
  372. ggml_init_params params = {
  373. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  374. /* .mem_base = */ NULL,
  375. /* .no_alloc = */ true,
  376. };
  377. ggml_context * ctx = ggml_init(params);
  378. GGML_ASSERT(ctx);
  379. gf = ggml_new_graph(ctx);
  380. // pre-graph sentinel
  381. add_sentinel(ctx);
  382. ggml_tensor * out = build_graph(ctx);
  383. if (op_name != nullptr && op_desc(out) != op_name) {
  384. //printf(" %s: skipping\n", op_desc(out).c_str());
  385. ggml_free(ctx);
  386. return true;
  387. }
  388. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  389. fflush(stdout);
  390. // check if the backends support the ops
  391. bool supported = true;
  392. for (ggml_backend_t backend : {backend1, backend2}) {
  393. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  394. if (!ggml_backend_supports_op(backend, t)) {
  395. printf("not supported [%s] ", ggml_backend_name(backend));
  396. supported = false;
  397. break;
  398. }
  399. }
  400. }
  401. if (!supported) {
  402. printf("\n");
  403. ggml_free(ctx);
  404. return true;
  405. }
  406. // post-graph sentinel
  407. add_sentinel(ctx);
  408. // allocate
  409. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  410. if (buf == NULL) {
  411. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
  412. ggml_free(ctx);
  413. return false;
  414. }
  415. // build graph
  416. ggml_build_forward_expand(gf, out);
  417. // add sentinels as graph nodes so that they are checked in the callback
  418. for (ggml_tensor * sentinel : sentinels) {
  419. ggml_graph_add_node(gf, sentinel);
  420. }
  421. // randomize tensors
  422. initialize_tensors(ctx);
  423. // compare
  424. struct callback_userdata {
  425. bool ok;
  426. double max_err;
  427. ggml_backend_t backend1;
  428. ggml_backend_t backend2;
  429. };
  430. callback_userdata ud {
  431. true,
  432. max_nmse_err(),
  433. backend1,
  434. backend2
  435. };
  436. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  437. callback_userdata * ud = (callback_userdata *) user_data;
  438. const char * bn1 = ggml_backend_name(ud->backend1);
  439. const char * bn2 = ggml_backend_name(ud->backend2);
  440. if (t1->op == GGML_OP_NONE) {
  441. // sentinels must be unchanged
  442. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  443. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  444. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  445. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  446. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  447. printf("sentinel mismatch: %s ", t1->name);
  448. ud->ok = false;
  449. return true;
  450. }
  451. }
  452. std::vector<float> f1 = tensor_to_float(t1);
  453. std::vector<float> f2 = tensor_to_float(t2);
  454. for (size_t i = 0; i < f1.size(); i++) {
  455. // check for nans
  456. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  457. printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
  458. ud->ok = false;
  459. return true;
  460. }
  461. // check for infs: both must be inf of the same sign, or both must be finite
  462. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  463. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  464. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  465. printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  466. ud->ok = false;
  467. return true;
  468. }
  469. } else {
  470. printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
  471. ud->ok = false;
  472. return true;
  473. }
  474. }
  475. }
  476. double err = nmse(f1.data(), f2.data(), f1.size());
  477. if (err > ud->max_err) {
  478. printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
  479. //for (int i = 0; i < (int) f1.size(); i++) {
  480. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  481. //}
  482. //printf("\n");
  483. //exit(1);
  484. ud->ok = false;
  485. }
  486. return true;
  487. GGML_UNUSED(index);
  488. };
  489. const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud, run_whole_graph() ? out : nullptr);
  490. if (!cmp_ok) {
  491. printf("compare failed ");
  492. }
  493. ggml_backend_buffer_free(buf);
  494. ggml_free(ctx);
  495. if (ud.ok && cmp_ok) {
  496. printf("\033[1;32mOK\033[0m\n");
  497. return true;
  498. }
  499. printf("\033[1;31mFAIL\033[0m\n");
  500. return false;
  501. }
  502. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  503. mode = MODE_PERF;
  504. static const size_t graph_nodes = 8192;
  505. ggml_init_params params = {
  506. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  507. /* .mem_base = */ NULL,
  508. /* .no_alloc = */ true,
  509. };
  510. ggml_context_ptr ctx(ggml_init(params)); // smart ptr
  511. GGML_ASSERT(ctx);
  512. ggml_tensor * out = build_graph(ctx.get());
  513. if (op_name != nullptr && op_desc(out) != op_name) {
  514. //printf(" %s: skipping\n", op_desc(out).c_str());
  515. return true;
  516. }
  517. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  518. fflush(stdout);
  519. // check if backends support op
  520. if (!ggml_backend_supports_op(backend, out)) {
  521. printf("not supported\n");
  522. return true;
  523. }
  524. // align while also leaving some margin for variations in parameters
  525. int align = 8;
  526. int last = (len + align - 1) / align * align;
  527. if (last - len < 5) {
  528. last += align;
  529. }
  530. printf("%*s", last - len, "");
  531. // allocate
  532. ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr
  533. if (buf == NULL) {
  534. printf("failed to allocate tensors\n");
  535. return false;
  536. }
  537. // randomize tensors
  538. initialize_tensors(ctx.get());
  539. // build graph
  540. ggml_cgraph * gf = ggml_new_graph_custom(ctx.get(), graph_nodes, false);
  541. ggml_build_forward_expand(gf, out);
  542. // warmup run
  543. ggml_status status = ggml_backend_graph_compute(backend, gf);
  544. if (status != GGML_STATUS_SUCCESS) {
  545. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  546. return false;
  547. }
  548. // determine number of runs
  549. int n_runs;
  550. bool is_cpu = ggml_backend_dev_type(ggml_backend_get_device(backend)) == GGML_BACKEND_DEVICE_TYPE_CPU;
  551. if (op_flops(out) > 0) {
  552. // based on flops
  553. const uint64_t GFLOP = 1000 * 1000 * 1000;
  554. const uint64_t target_flops_cpu = 8ULL * GFLOP;
  555. const uint64_t target_flops_gpu = 100ULL * GFLOP;
  556. uint64_t target_flops = is_cpu ? target_flops_cpu : target_flops_gpu;
  557. n_runs = std::min<int>(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_flops / op_flops(out)) + 1;
  558. } else {
  559. // based on memory size
  560. const size_t GB = 1ULL << 30;
  561. const size_t target_size_cpu = 8 * GB;
  562. const size_t target_size_gpu = 32 * GB;
  563. size_t target_size = is_cpu ? target_size_cpu : target_size_gpu;
  564. n_runs = std::min<int>(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_size / op_size(out)) + 1;
  565. }
  566. // duplicate the op
  567. for (int i = 1; i < n_runs; i++) {
  568. ggml_graph_add_node(gf, out);
  569. }
  570. // calculate memory
  571. size_t mem = n_runs * op_size(out);
  572. auto tensor_op_size = [](ggml_tensor * t) {
  573. size_t size = ggml_nbytes(t);
  574. // add source tensors
  575. for (int i = 0; i < GGML_MAX_SRC; i++) {
  576. if (t->src[i] != NULL) {
  577. size += ggml_nbytes(t->src[i]);
  578. }
  579. }
  580. return size;
  581. };
  582. for (int i = 0; i < ggml_graph_n_nodes(gf); ++i) {
  583. if (ggml_is_view_op(ggml_graph_node(gf, i)->op) || ggml_graph_node(gf, i) == out) {
  584. continue;
  585. }
  586. mem += tensor_op_size(ggml_graph_node(gf, i));
  587. }
  588. // run
  589. int64_t total_time_us = 0;
  590. int64_t total_mem = 0;
  591. int total_runs = 0;
  592. do {
  593. int64_t start_time = ggml_time_us();
  594. ggml_status status = ggml_backend_graph_compute(backend, gf);
  595. if (status != GGML_STATUS_SUCCESS) {
  596. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  597. return false;
  598. }
  599. int64_t end_time = ggml_time_us();
  600. total_time_us += end_time - start_time;
  601. total_mem += mem;
  602. total_runs += n_runs;
  603. } while (total_time_us < 1000*1000); // run for at least 1 second
  604. printf(" %8d runs - %8.2f us/run - ",
  605. total_runs,
  606. (double)total_time_us / total_runs);
  607. if (op_flops(out) > 0) {
  608. double flops_per_sec = (op_flops(out) * total_runs) / (total_time_us / 1e6);
  609. auto format_flops = [](double flops) -> std::string {
  610. char buf[256];
  611. if (flops >= 1e12) {
  612. snprintf(buf, sizeof(buf), "%6.2f TFLOP", flops / 1e12);
  613. } else if (flops >= 1e9) {
  614. snprintf(buf, sizeof(buf), "%6.2f GFLOP", flops / 1e9);
  615. } else if (flops >= 1e6) {
  616. snprintf(buf, sizeof(buf), "%6.2f MFLOP", flops / 1e6);
  617. } else {
  618. snprintf(buf, sizeof(buf), "%6.2f KFLOP", flops / 1e3);
  619. }
  620. return buf;
  621. };
  622. printf("%s/run - \033[1;34m%sS\033[0m",
  623. format_flops(op_flops(out)).c_str(),
  624. format_flops(flops_per_sec).c_str());
  625. } else {
  626. printf("%8zu kB/run - \033[1;34m%7.2f GB/s\033[0m",
  627. op_size(out) / 1024,
  628. total_mem / (total_time_us / 1e6) / 1024.0 / 1024.0 / 1024.0);
  629. }
  630. printf("\n");
  631. return true;
  632. }
  633. bool eval_grad(ggml_backend_t backend, const char * op_name) {
  634. mode = MODE_GRAD;
  635. const std::vector<float> expect = grad_expect();
  636. ggml_init_params params = {
  637. /* .mem_size = */ ggml_tensor_overhead()*128 + 2*ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, true),
  638. /* .mem_base = */ NULL,
  639. /* .no_alloc = */ true,
  640. };
  641. ggml_context_ptr ctx(ggml_init(params)); // smart ptr
  642. GGML_ASSERT(ctx);
  643. gf = ggml_new_graph_custom(ctx.get(), GGML_DEFAULT_GRAPH_SIZE, true);
  644. gb = ggml_new_graph_custom(ctx.get(), GGML_DEFAULT_GRAPH_SIZE, true);
  645. ggml_tensor * out = build_graph(ctx.get());
  646. if ((op_name != nullptr && op_desc(out) != op_name) || out->op == GGML_OP_OPT_STEP_ADAMW) {
  647. //printf(" %s: skipping\n", op_desc(out).c_str());
  648. return true;
  649. }
  650. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  651. fflush(stdout);
  652. if (out->type != GGML_TYPE_F32) {
  653. printf("not supported [%s->type != FP32]\n", out->name);
  654. return true;
  655. }
  656. // check if the backend supports the ops
  657. bool supported = true;
  658. bool any_params = false;
  659. for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) {
  660. if (!ggml_backend_supports_op(backend, t)) {
  661. printf("not supported [%s] ", ggml_backend_name(backend));
  662. supported = false;
  663. break;
  664. }
  665. if ((t->flags & GGML_TENSOR_FLAG_PARAM)) {
  666. any_params = true;
  667. if (t->type != GGML_TYPE_F32) {
  668. printf("not supported [%s->type != FP32] ", t->name);
  669. supported = false;
  670. break;
  671. }
  672. }
  673. }
  674. if (!any_params) {
  675. printf("not supported [%s] \n", op_desc(out).c_str());
  676. supported = false;
  677. }
  678. if (!supported) {
  679. printf("\n");
  680. return true;
  681. }
  682. int64_t ngrads = 0;
  683. for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) {
  684. if (t->flags & GGML_TENSOR_FLAG_PARAM) {
  685. ngrads += ggml_nelements(t);
  686. }
  687. }
  688. if (ngrads > grad_nmax()) {
  689. printf("skipping large tensors for speed \n");
  690. return true;
  691. }
  692. if (!ggml_is_scalar(out)) {
  693. out = ggml_sum(ctx.get(), out);
  694. ggml_set_name(out, "sum_of_out");
  695. }
  696. ggml_set_loss(out);
  697. ggml_build_forward_expand(gf, out);
  698. ggml_graph_cpy(gf, gb);
  699. ggml_build_backward_expand(ctx.get(), gb, nullptr);
  700. if (expect.size() != 1 || expect[0] != 0.0f) {
  701. GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf));
  702. for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) {
  703. GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || ggml_graph_get_grad(gb, t)->op != GGML_OP_NONE);
  704. }
  705. }
  706. for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) {
  707. if (!ggml_backend_supports_op(backend, t)) {
  708. printf("not supported [%s] ", ggml_backend_name(backend));
  709. supported = false;
  710. break;
  711. }
  712. if ((t->flags & GGML_TENSOR_FLAG_PARAM) && t->type != GGML_TYPE_F32) {
  713. printf("not supported [%s->type != FP32] ", t->name);
  714. supported = false;
  715. break;
  716. }
  717. }
  718. if (!supported) {
  719. printf("\n");
  720. return true;
  721. }
  722. // allocate
  723. ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr
  724. if (buf == NULL) {
  725. printf("failed to allocate tensors [%s] ", ggml_backend_name(backend));
  726. return false;
  727. }
  728. initialize_tensors(ctx.get()); // Randomizes all tensors (including gradients).
  729. ggml_graph_reset(gb); // Sets gradients to 1 if loss, 0 otherwise.
  730. ggml_status status = ggml_backend_graph_compute(backend, gf);
  731. if (status != GGML_STATUS_SUCCESS) {
  732. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  733. return false;
  734. }
  735. status = ggml_backend_graph_compute(backend, gb);
  736. if (status != GGML_STATUS_SUCCESS) {
  737. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  738. return false;
  739. }
  740. bool ok = true;
  741. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; t = ggml_get_next_tensor(ctx.get(), t)) {
  742. if (!(t->flags & GGML_TENSOR_FLAG_PARAM)) {
  743. continue;
  744. }
  745. const char * bn = ggml_backend_name(backend);
  746. const int64_t ne = ggml_nelements(t);
  747. std::vector<float> ga;
  748. struct ggml_tensor * grad = ggml_graph_get_grad(gb, t);
  749. if (grad) {
  750. ga = tensor_to_float(grad);
  751. } else {
  752. ga.resize(ne); // default value is 0.0f
  753. }
  754. for (int64_t i = 0; i < ne; ++i) { // gradient algebraic
  755. // check for nans
  756. if (!std::isfinite(ga[i])) {
  757. printf("[%s] nonfinite gradient at index %" PRId64 " (%s=%f) ", ggml_op_desc(t), i, bn, ga[i]);
  758. ok = false;
  759. break;
  760. }
  761. }
  762. if (!ok) {
  763. break;
  764. }
  765. std::vector<float> gn(ne); // gradient numeric
  766. GGML_ASSERT(ga.size() == gn.size());
  767. std::vector<float> x0 = tensor_to_float(t); // original t data
  768. GGML_ASSERT(ggml_is_scalar(out));
  769. GGML_ASSERT(out->type == GGML_TYPE_F32);
  770. const float eps = grad_eps();
  771. for (int64_t i = 0; i < ne; ++i) {
  772. const float xiu = x0[i] + 1.0f*eps; // x, index i, up
  773. const float xiuh = x0[i] + 0.5f*eps; // x, index i, up half
  774. const float xidh = x0[i] - 0.5f*eps; // x, index i, down half
  775. const float xid = x0[i] - 1.0f*eps; // x, index i, down
  776. float fu, fuh, fdh, fd; // output values for xiu, xiuh, xid, xidh
  777. ggml_backend_tensor_set(t, &xiu, i*sizeof(float), sizeof(float));
  778. status = ggml_backend_graph_compute(backend, gf);
  779. if (status != GGML_STATUS_SUCCESS) {
  780. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  781. return false;
  782. }
  783. ggml_backend_tensor_get(out, &fu, 0, ggml_nbytes(out));
  784. ggml_backend_tensor_set(t, &xid, i*sizeof(float), sizeof(float));
  785. status = ggml_backend_graph_compute(backend, gf);
  786. if (status != GGML_STATUS_SUCCESS) {
  787. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  788. return false;
  789. }
  790. ggml_backend_tensor_get(out, &fd, 0, ggml_nbytes(out));
  791. if (grad_precise()) {
  792. ggml_backend_tensor_set(t, &xiuh, i*sizeof(float), sizeof(float));
  793. status = ggml_backend_graph_compute(backend, gf);
  794. if (status != GGML_STATUS_SUCCESS) {
  795. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  796. return false;
  797. }
  798. ggml_backend_tensor_get(out, &fuh, 0, ggml_nbytes(out));
  799. ggml_backend_tensor_set(t, &xidh, i*sizeof(float), sizeof(float));
  800. status = ggml_backend_graph_compute(backend, gf);
  801. if (status != GGML_STATUS_SUCCESS) {
  802. fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status));
  803. return false;
  804. }
  805. ggml_backend_tensor_get(out, &fdh, 0, ggml_nbytes(out));
  806. gn[i] = (8.0*(double)fuh + (double)fd - (8.0*(double)fdh + (double)fu)) / (6.0*(double)eps);
  807. } else {
  808. gn[i] = (fu - fd) / (2.0f*eps);
  809. }
  810. ggml_backend_tensor_set(t, x0.data(), 0, ggml_nbytes(t));
  811. }
  812. const double err = mean_abs_asymm(gn.data(), ga.data(), gn.size(), expect);
  813. if (err > max_maa_err()) {
  814. printf("[%s] MAA = %.9f > %.9f ", ggml_op_desc(t), err, max_maa_err());
  815. ok = false;
  816. break;
  817. }
  818. if (!ok) {
  819. break;
  820. }
  821. }
  822. if (!ok) {
  823. printf("compare failed ");
  824. }
  825. if (ok) {
  826. printf("\033[1;32mOK\033[0m\n");
  827. return true;
  828. }
  829. printf("\033[1;31mFAIL\033[0m\n");
  830. return false;
  831. }
  832. };
  833. // ###################################
  834. // ## Section 2: GGML Op Defintions ##
  835. // ###################################
  836. // The following is an example showing the bare minimum for creating a test for a GGML op.
  837. // GGML_OP_EXAMPLE
  838. struct test_example : public test_case {
  839. // Always define these 2 or variants thereof:
  840. const ggml_type type; // The type of the input tensors.
  841. const std::array<int64_t, 4> ne; // The shape of the input tensors.
  842. // For some ops it's necessary to define multiple types or shapes for the inputs.
  843. // Or they may need additional parameters.
  844. // Put all parameters needed to fully define the test into one of the VARS_TO_STR macros.
  845. // In most cases these are just the properties of the struct that you defined above.
  846. // This is needed for info prints.
  847. std::string vars() override {
  848. return VARS_TO_STR2(type, ne);
  849. }
  850. // Define a constructor for the struct.
  851. // In most cases it will be sufficient to have the same arguments as the struct has properties
  852. // and just use initializer lists.
  853. test_example(ggml_type type = GGML_TYPE_F32,
  854. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  855. : type(type), ne(ne) {}
  856. // Define how a simple GGML compute graph can be constructed for the new GGML op.
  857. ggml_tensor * build_graph(ggml_context * ctx) override {
  858. // Step 1: create input tensors that don't depend on any other tensors:
  859. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  860. ggml_set_name(a, "a"); // Setting names is optional but it's useful for debugging.
  861. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  862. ggml_set_name(b, "b");
  863. // Step 2: use the op that you want to test in the GGML compute graph.
  864. ggml_tensor * out = ggml_add(ctx, a, b); // For this example we're just doing a simple addition.
  865. ggml_set_name(out, "out");
  866. // Step 3: return the output tensor.
  867. return out;
  868. }
  869. // In order to also check the gradients for your op, add calls like ggml_set_param(a)
  870. // immediately after you create the tensors.
  871. // This is optional and only makes sense if a backward pass has actually been implemented for the new op.
  872. };
  873. // GGML_OP_UNARY
  874. struct test_unary : public test_case {
  875. const ggml_unary_op op;
  876. const ggml_type type;
  877. const std::array<int64_t, 4> ne_a;
  878. int v; // view (1 : non-contiguous a)
  879. std::string vars() override {
  880. return VARS_TO_STR3(type, ne_a, v);
  881. }
  882. test_unary(ggml_unary_op op,
  883. ggml_type type = GGML_TYPE_F32,
  884. std::array<int64_t, 4> ne_a = {128, 2, 2, 2},
  885. int v = 0)
  886. : op(op), type(type), ne_a(ne_a), v(v) {}
  887. ggml_tensor * build_graph(ggml_context * ctx) override {
  888. const bool grad_supported = op == GGML_UNARY_OP_ABS || op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_NEG ||
  889. op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU;
  890. ggml_tensor * a;
  891. if (v & 1) {
  892. auto ne = ne_a; ne[0] *= 3;
  893. a = ggml_new_tensor(ctx, type, 4, ne.data());
  894. if (grad_supported) {
  895. ggml_set_param(a);
  896. }
  897. ggml_set_name(a, "a");
  898. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  899. ggml_set_name(a, "view_of_a");
  900. } else {
  901. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  902. if (grad_supported) {
  903. ggml_set_param(a);
  904. }
  905. ggml_set_name(a, "a");
  906. }
  907. ggml_tensor * out = ggml_unary(ctx, a, op);
  908. ggml_set_name(out, "out");
  909. return out;
  910. }
  911. void initialize_tensors(ggml_context * ctx) override {
  912. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  913. // test extended range of values to check for NaNs in GELU
  914. init_tensor_uniform(t, -150.f, 150.f);
  915. }
  916. }
  917. float grad_eps() override {
  918. return 15.0f;
  919. }
  920. std::vector<float> grad_expect() override {
  921. if (op == GGML_UNARY_OP_ABS) {
  922. return {-1.0f, 1.0f};
  923. }
  924. if (op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_STEP) {
  925. return {0.0f};
  926. }
  927. if (op == GGML_UNARY_OP_RELU) {
  928. return {0.0f, 1.0f};
  929. }
  930. return {};
  931. }
  932. };
  933. // GGML_OP_GLU
  934. struct test_glu : public test_case {
  935. const ggml_glu_op op;
  936. const ggml_type type;
  937. const std::array<int64_t, 4> ne_a;
  938. int v; // view (1 : non-contiguous a)
  939. bool swapped;
  940. std::string vars() override {
  941. return VARS_TO_STR4(type, ne_a, v, swapped);
  942. }
  943. test_glu(ggml_glu_op op,
  944. ggml_type type = GGML_TYPE_F32,
  945. std::array<int64_t, 4> ne_a = {128, 2, 2, 2},
  946. int v = 0,
  947. bool swapped = false)
  948. : op(op), type(type), ne_a(ne_a), v(v), swapped(swapped) {}
  949. ggml_tensor * build_graph(ggml_context * ctx) override {
  950. ggml_tensor * a;
  951. if (v & 1) {
  952. auto ne = ne_a; ne[0] *= 3;
  953. a = ggml_new_tensor(ctx, type, 4, ne.data());
  954. ggml_set_name(a, "a");
  955. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  956. ggml_set_name(a, "view_of_a");
  957. } else {
  958. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  959. ggml_set_name(a, "a");
  960. }
  961. ggml_tensor * out = ggml_glu(ctx, a, op, swapped);
  962. ggml_set_name(out, "out");
  963. return out;
  964. }
  965. void initialize_tensors(ggml_context * ctx) override {
  966. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  967. // test extended range of values to check for NaNs in GELU
  968. init_tensor_uniform(t, -150.f, 150.f);
  969. }
  970. }
  971. };
  972. struct test_glu_split : public test_case {
  973. const ggml_glu_op op;
  974. const ggml_type type;
  975. const std::array<int64_t, 4> ne_a;
  976. int v; // view (1 : non-contiguous a)
  977. std::string vars() override {
  978. return VARS_TO_STR3(type, ne_a, v) + ",split";
  979. }
  980. test_glu_split(ggml_glu_op op,
  981. ggml_type type = GGML_TYPE_F32,
  982. std::array<int64_t, 4> ne_a = {128, 2, 2, 2},
  983. int v = 0)
  984. : op(op), type(type), ne_a(ne_a), v(v) {}
  985. ggml_tensor * build_graph(ggml_context * ctx) override {
  986. ggml_tensor * a;
  987. ggml_tensor * b;
  988. if (v & 1) {
  989. auto ne = ne_a; ne[0] *= 3;
  990. a = ggml_new_tensor(ctx, type, 4, ne.data());
  991. ggml_set_name(a, "a");
  992. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  993. ggml_set_name(a, "view_of_a");
  994. b = ggml_new_tensor(ctx, type, 4, ne.data());
  995. ggml_set_name(b, "b");
  996. b = ggml_view_4d(ctx, b, ne_a[0], ne_a[1], ne_a[2], ne_a[3], b->nb[1], b->nb[2], b->nb[3], 0);
  997. ggml_set_name(a, "view_of_b");
  998. } else {
  999. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1000. ggml_set_name(a, "a");
  1001. b = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1002. ggml_set_name(b, "b");
  1003. }
  1004. ggml_tensor * out = ggml_glu_split(ctx, a, b, op);
  1005. ggml_set_name(out, "out");
  1006. return out;
  1007. }
  1008. void initialize_tensors(ggml_context * ctx) override {
  1009. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1010. // test extended range of values to check for NaNs in GELU
  1011. init_tensor_uniform(t, -150.f, 150.f);
  1012. }
  1013. }
  1014. };
  1015. // GGML_OP_GET_ROWS
  1016. struct test_get_rows : public test_case {
  1017. const ggml_type type;
  1018. const int n; // cols
  1019. const int m; // rows
  1020. const int r; // rows to get
  1021. const int b; // batch size
  1022. const bool v; // view (non-contiguous src1)
  1023. std::string vars() override {
  1024. return VARS_TO_STR6(type, n, m, r, b, v);
  1025. }
  1026. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  1027. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  1028. ggml_tensor * build_graph(ggml_context * ctx) override {
  1029. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  1030. ggml_set_name(in, "in");
  1031. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  1032. ggml_set_name(rows, "rows");
  1033. if (v) {
  1034. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  1035. ggml_set_name(rows, "view_of_rows");
  1036. }
  1037. const bool grad_supported = ggml_is_matrix(in) && ggml_is_vector(rows);
  1038. if (grad_supported) {
  1039. ggml_set_param(in);
  1040. // rows is a constant input -> no gradients
  1041. }
  1042. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  1043. ggml_set_name(out, "out");
  1044. return out;
  1045. }
  1046. void initialize_tensors(ggml_context * ctx) override {
  1047. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1048. if (t->type == GGML_TYPE_I32) {
  1049. if (ggml_is_view_op(t->op)) { continue; }
  1050. // rows
  1051. std::vector<int> data(r*b);
  1052. for (int i = 0; i < r*b; i++) {
  1053. data[i] = rand() % m;
  1054. }
  1055. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  1056. } else {
  1057. init_tensor_uniform(t);
  1058. }
  1059. }
  1060. }
  1061. };
  1062. // GGML_OP_GET_ROWS_BACK
  1063. struct test_get_rows_back : public test_case {
  1064. const ggml_type type;
  1065. const int n; // cols
  1066. const int m; // rows
  1067. const int r; // rows to get
  1068. const int b; // batch size
  1069. const bool v; // view (non-contiguous src1)
  1070. std::string vars() override {
  1071. return VARS_TO_STR6(type, n, m, r, b, v);
  1072. }
  1073. test_get_rows_back(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  1074. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  1075. ggml_tensor * build_graph(ggml_context * ctx) override {
  1076. ggml_tensor * in_forward = ggml_new_tensor_3d(ctx, type, n, m, b);
  1077. ggml_set_name(in_forward, "in_forward");
  1078. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  1079. ggml_set_name(rows, "rows");
  1080. if (v) {
  1081. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  1082. ggml_set_name(rows, "view_of_rows");
  1083. }
  1084. ggml_tensor * grad = ggml_new_tensor_3d(ctx, type, n, r, b);
  1085. ggml_set_name(grad, "grad");
  1086. ggml_tensor * out = ggml_get_rows_back(ctx, grad, rows, in_forward);
  1087. ggml_set_name(out, "out");
  1088. return out;
  1089. }
  1090. void initialize_tensors(ggml_context * ctx) override {
  1091. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1092. if (t->type == GGML_TYPE_I32) {
  1093. if (ggml_is_view_op(t->op)) { continue; }
  1094. // rows
  1095. std::vector<int> data(r*b);
  1096. for (int i = 0; i < r*b; i++) {
  1097. data[i] = rand() % m;
  1098. }
  1099. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  1100. } else {
  1101. init_tensor_uniform(t);
  1102. }
  1103. }
  1104. }
  1105. };
  1106. // GGML_OP_SET_ROWS
  1107. struct test_set_rows : public test_case {
  1108. const ggml_type type;
  1109. const std::array<int64_t, 4> ne;
  1110. const std::array<int, 2> nr23; // broadcast only dims 2 and 3
  1111. const int r; // rows to set
  1112. const bool v; // view (non-contiguous src1)
  1113. std::string vars() override {
  1114. return VARS_TO_STR5(type, ne, nr23, r, v);
  1115. }
  1116. test_set_rows(ggml_type type,
  1117. std::array<int64_t, 4> ne,
  1118. std::array<int, 2> nr23,
  1119. int r, bool v = false)
  1120. : type(type), ne(ne), nr23(nr23), r(r), v(v) {}
  1121. ggml_tensor * build_graph(ggml_context * ctx) override {
  1122. ggml_tensor * dst = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2]*nr23[0], ne[3]*nr23[1]);
  1123. ggml_set_name(dst, "dst");
  1124. ggml_tensor * src = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, ne[0], r, ne[2]*nr23[0], ne[3]*nr23[1]);
  1125. ggml_set_name(src, "src");
  1126. ggml_tensor * row_idxs = ggml_new_tensor_3d(ctx, GGML_TYPE_I64, r, ne[2], ne[3]);
  1127. ggml_set_name(row_idxs, "row_idxs");
  1128. if (v) {
  1129. src = ggml_view_4d(ctx, src, ne[0], r/2, ne[2]*nr23[0], ne[3]*nr23[1], src->nb[1], src->nb[2], src->nb[3], 0);
  1130. row_idxs = ggml_view_3d(ctx, row_idxs, r/2, ne[2], ne[3], row_idxs->nb[1], row_idxs->nb[2], 0);
  1131. ggml_set_name(row_idxs, "view_of_rows");
  1132. }
  1133. ggml_tensor * out = ggml_set_rows(ctx, dst, src, row_idxs);
  1134. ggml_set_name(out, "out");
  1135. return out;
  1136. }
  1137. void initialize_tensors(ggml_context * ctx) override {
  1138. std::random_device rd;
  1139. std::default_random_engine rng(rd());
  1140. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1141. if (t->type == GGML_TYPE_I64) {
  1142. if (ggml_is_view_op(t->op)) {
  1143. continue;
  1144. }
  1145. for (int i2 = 0; i2 < t->ne[2]; i2++) {
  1146. for (int i1 = 0; i1 < t->ne[1]; i1++) {
  1147. // generate a shuffled subset of row indices
  1148. std::vector<int64_t> data(ne[1]);
  1149. for (int i = 0; i < ne[1]; i++) {
  1150. data[i] = i;
  1151. }
  1152. std::shuffle(data.begin(), data.end(), rng);
  1153. data.resize(t->ne[0]);
  1154. const size_t offs = i1*t->nb[1] + i2*t->nb[2];
  1155. ggml_backend_tensor_set(t, data.data(), offs, t->ne[0]*sizeof(int64_t));
  1156. }
  1157. }
  1158. } else {
  1159. init_tensor_uniform(t);
  1160. }
  1161. }
  1162. }
  1163. };
  1164. // GGML_OP_ARGMAX
  1165. struct test_argmax : public test_case {
  1166. const ggml_type type;
  1167. const std::array<int64_t, 4> ne;
  1168. std::string vars() override {
  1169. return VARS_TO_STR2(type, ne);
  1170. }
  1171. test_argmax(ggml_type type = GGML_TYPE_F32,
  1172. std::array<int64_t, 4> ne = {10, 100, 1, 1})
  1173. : type(type), ne(ne) {}
  1174. ggml_tensor * build_graph(ggml_context * ctx) override {
  1175. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1176. ggml_set_name(a, "a");
  1177. ggml_tensor * out = ggml_argmax(ctx, a);
  1178. ggml_set_name(out, "out");
  1179. return out;
  1180. }
  1181. void initialize_tensors(ggml_context * ctx) override {
  1182. std::random_device rd;
  1183. std::default_random_engine rng(rd());
  1184. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1185. if (t->type == GGML_TYPE_F32) {
  1186. // initialize with unique values to avoid ties
  1187. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1188. std::vector<float> data(t->ne[0]);
  1189. for (int i = 0; i < t->ne[0]; i++) {
  1190. data[i] = i;
  1191. }
  1192. std::shuffle(data.begin(), data.end(), rng);
  1193. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  1194. }
  1195. } else {
  1196. init_tensor_uniform(t);
  1197. }
  1198. }
  1199. }
  1200. double max_nmse_err() override {
  1201. return 0.0;
  1202. }
  1203. };
  1204. // GGML_OP_COUNT_EQUAL
  1205. struct test_count_equal : public test_case {
  1206. const ggml_type type;
  1207. const std::array<int64_t, 4> ne;
  1208. std::string vars() override {
  1209. return VARS_TO_STR2(type, ne);
  1210. }
  1211. test_count_equal(ggml_type type = GGML_TYPE_F32,
  1212. std::array<int64_t, 4> ne = {4, 500, 1, 1})
  1213. : type(type), ne(ne) {}
  1214. ggml_tensor * build_graph(ggml_context * ctx) override {
  1215. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1216. ggml_set_name(a, "a");
  1217. ggml_tensor * a_argmax = ggml_argmax(ctx, a);
  1218. ggml_set_name(a_argmax, "a_argmax");
  1219. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1220. ggml_set_name(b, "b");
  1221. ggml_tensor * b_argmax = ggml_argmax(ctx, b);
  1222. ggml_set_name(b_argmax, "b_argmax");
  1223. ggml_tensor * out = ggml_count_equal(ctx, a_argmax, b_argmax);
  1224. ggml_set_name(out, "out");
  1225. return out;
  1226. }
  1227. double max_nmse_err() override {
  1228. return 0.0;
  1229. }
  1230. };
  1231. // GGML_OP_REPEAT
  1232. struct test_repeat : public test_case {
  1233. const ggml_type type;
  1234. const std::array<int64_t, 4> ne;
  1235. const std::array<int, 4> nr;
  1236. std::string vars() override {
  1237. return VARS_TO_STR3(type, ne, nr);
  1238. }
  1239. size_t op_size(ggml_tensor * t) override {
  1240. return ggml_nbytes(t) * 2;
  1241. }
  1242. test_repeat(ggml_type type = GGML_TYPE_F32,
  1243. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  1244. std::array<int, 4> nr = {2, 2, 2, 2})
  1245. : type(type), ne(ne), nr(nr) {}
  1246. ggml_tensor * build_graph(ggml_context * ctx) override {
  1247. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  1248. ggml_set_name(target, "target");
  1249. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1250. ggml_set_param(src);
  1251. ggml_set_name(src, "src");
  1252. ggml_tensor * out = ggml_repeat(ctx, src, target);
  1253. ggml_set_name(out, "out");
  1254. return out;
  1255. }
  1256. };
  1257. // GGML_OP_REPEAT_BACK
  1258. struct test_repeat_back : public test_case {
  1259. const ggml_type type;
  1260. const std::array<int64_t, 4> ne;
  1261. const std::array<int, 4> nr;
  1262. const bool v; // whether src is a noncontiguous view
  1263. std::string vars() override {
  1264. return VARS_TO_STR4(type, ne, nr, v);
  1265. }
  1266. size_t op_size(ggml_tensor * t) override {
  1267. return ggml_nbytes(t) * 2;
  1268. }
  1269. test_repeat_back(ggml_type type = GGML_TYPE_F32,
  1270. std::array<int64_t, 4> ne = {8, 6, 4, 2},
  1271. std::array<int, 4> nr = {2, 2, 2, 2},
  1272. bool v = false)
  1273. : type(type), ne(ne), nr(nr), v(v) {}
  1274. ggml_tensor * build_graph(ggml_context * ctx) override {
  1275. ggml_tensor * src = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  1276. ggml_set_name(src, "src");
  1277. if (v) {
  1278. GGML_ASSERT(ne[0] % 2 == 0);
  1279. GGML_ASSERT(ne[1] % 2 == 0);
  1280. GGML_ASSERT(ne[2] % 2 == 0);
  1281. GGML_ASSERT(ne[3] % 2 == 0);
  1282. GGML_ASSERT(nr[0] % 2 == 0 || nr[0] == 1);
  1283. GGML_ASSERT(nr[1] % 2 == 0 || nr[1] == 1);
  1284. GGML_ASSERT(nr[2] % 2 == 0 || nr[2] == 1);
  1285. GGML_ASSERT(nr[3] % 2 == 0 || nr[3] == 1);
  1286. const int64_t ne00 = nr[0] == 1 ? src->ne[0] : src->ne[0] / 2;
  1287. const int64_t ne01 = nr[1] == 1 ? src->ne[1] : src->ne[1] / 2;
  1288. const int64_t ne02 = nr[2] == 1 ? src->ne[2] : src->ne[2] / 2;
  1289. const int64_t ne03 = nr[3] == 1 ? src->ne[3] : src->ne[3] / 2;
  1290. src = ggml_view_4d(ctx, src, ne00, ne01, ne02, ne03, src->nb[1], src->nb[2], src->nb[3], 0);
  1291. }
  1292. ggml_tensor * target = ggml_new_tensor(ctx, type, 4, ne.data());
  1293. ggml_set_name(target, "target");
  1294. ggml_tensor * out = ggml_repeat_back(ctx, src, target);
  1295. ggml_set_name(out, "out");
  1296. return out;
  1297. }
  1298. };
  1299. // GGML_OP_DUP
  1300. struct test_dup : public test_case {
  1301. const ggml_type type;
  1302. const std::array<int64_t, 4> ne;
  1303. const std::array<int64_t, 4> permute;
  1304. bool _use_permute;
  1305. std::string vars() override {
  1306. std::string v = VARS_TO_STR2(type, ne);
  1307. if (_use_permute) v += "," + VAR_TO_STR(permute);
  1308. return v;
  1309. }
  1310. test_dup(ggml_type type = GGML_TYPE_F32,
  1311. std::array<int64_t, 4> ne = {10, 10, 20, 1},
  1312. std::array<int64_t, 4> permute = {0, 0, 0, 0})
  1313. : type(type), ne(ne), permute(permute),
  1314. _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
  1315. ggml_tensor * build_graph(ggml_context * ctx) override {
  1316. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1317. ggml_set_param(src);
  1318. ggml_set_name(src, "src");
  1319. if (_use_permute) {
  1320. src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
  1321. ggml_set_name(src, "src_permuted");
  1322. }
  1323. ggml_tensor * out = ggml_dup(ctx, src);
  1324. ggml_set_name(out, "out");
  1325. return out;
  1326. }
  1327. };
  1328. // GGML_OP_SET
  1329. struct test_set : public test_case {
  1330. const ggml_type type_src;
  1331. const ggml_type type_dst;
  1332. const std::array<int64_t, 4> ne;
  1333. const int dim;
  1334. std::string vars() override {
  1335. return VARS_TO_STR4(type_src, type_dst, ne, dim);
  1336. }
  1337. size_t op_size(ggml_tensor * t) override {
  1338. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  1339. }
  1340. test_set(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  1341. std::array<int64_t, 4> ne = {6, 5, 4, 3}, int dim = 1)
  1342. : type_src(type_src), type_dst(type_dst), ne(ne), dim(dim) {}
  1343. ggml_tensor * build_graph(ggml_context * ctx) override {
  1344. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  1345. ggml_set_param(src);
  1346. ggml_set_name(src, "src");
  1347. auto ne_dst = ne;
  1348. for (int i = 0; i < dim; ++i) {
  1349. ne_dst[i] *= 2;
  1350. }
  1351. ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, ne_dst.data());
  1352. ggml_set_param(dst);
  1353. ggml_set_name(dst, "dst");
  1354. size_t offset = 0;
  1355. for (int i = 0; i < dim; ++i) {
  1356. offset += ((ne_dst[i] - ne[i])/2)*dst->nb[i];
  1357. }
  1358. ggml_tensor * out = ggml_set(ctx, dst, src,
  1359. // The backward pass requires setting a contiguous region:
  1360. src->nb[1], src->nb[2], src->nb[3], offset);
  1361. ggml_set_name(out, "out");
  1362. return out;
  1363. }
  1364. };
  1365. // GGML_OP_CPY
  1366. struct test_cpy : public test_case {
  1367. const ggml_type type_src;
  1368. const ggml_type type_dst;
  1369. const std::array<int64_t, 4> ne;
  1370. const std::array<int64_t, 4> permute_src;
  1371. const std::array<int64_t, 4> permute_dst;
  1372. bool _src_use_permute;
  1373. bool _dst_use_permute;
  1374. std::string vars() override {
  1375. return VARS_TO_STR5(type_src, type_dst, ne, permute_src, permute_dst);
  1376. }
  1377. double max_nmse_err() override {
  1378. return 1e-6;
  1379. }
  1380. size_t op_size(ggml_tensor * t) override {
  1381. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  1382. }
  1383. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  1384. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  1385. std::array<int64_t, 4> permute_src = {0, 0, 0, 0},
  1386. std::array<int64_t, 4> permute_dst = {0, 0, 0, 0})
  1387. : type_src(type_src), type_dst(type_dst), ne(ne), permute_src(permute_src), permute_dst(permute_dst),
  1388. _src_use_permute(permute_src[0] + permute_src[1] + permute_src[2] + permute_src[3] > 0),
  1389. _dst_use_permute(permute_dst[0] + permute_dst[1] + permute_dst[2] + permute_dst[3] > 0) {}
  1390. ggml_tensor * build_graph(ggml_context * ctx) override {
  1391. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  1392. ggml_set_param(src);
  1393. ggml_set_name(src, "src");
  1394. if (_src_use_permute) {
  1395. src = ggml_permute(ctx, src, permute_src[0], permute_src[1], permute_src[2], permute_src[3]);
  1396. ggml_set_name(src, "src_permuted");
  1397. }
  1398. ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, src->ne);
  1399. ggml_set_name(dst, "dst");
  1400. if (_dst_use_permute) {
  1401. dst = ggml_permute(ctx, dst, permute_dst[0], permute_dst[1], permute_dst[2], permute_dst[3]);
  1402. ggml_set_name(dst, "dst_permuted");
  1403. }
  1404. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  1405. ggml_set_name(out, "out");
  1406. return out;
  1407. }
  1408. };
  1409. // GGML_OP_CONT
  1410. struct test_cont : public test_case {
  1411. const ggml_type type;
  1412. const std::array<int64_t, 4> ne;
  1413. std::string vars() override {
  1414. return VARS_TO_STR2(type, ne);
  1415. }
  1416. test_cont(ggml_type type = GGML_TYPE_F32,
  1417. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  1418. : type(type), ne(ne) {}
  1419. ggml_tensor * build_graph(ggml_context * ctx) override {
  1420. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  1421. ggml_set_param(src);
  1422. ggml_set_name(src, "src");
  1423. src = ggml_transpose(ctx, src);
  1424. ggml_set_name(src, "src_transposed");
  1425. ggml_tensor * out = ggml_cont(ctx, src);
  1426. ggml_set_name(out, "out");
  1427. return out;
  1428. }
  1429. };
  1430. // GGML_OP_ADD
  1431. // GGML_OP_SUB
  1432. // GGML_OP_MUL
  1433. // GGML_OP_DIV
  1434. struct test_bin_bcast : public test_case {
  1435. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  1436. op_t op;
  1437. const ggml_type type;
  1438. const std::array<int64_t, 4> ne;
  1439. const std::array<int, 4> nr;
  1440. std::string vars() override {
  1441. return VARS_TO_STR3(type, ne, nr);
  1442. }
  1443. size_t op_size(ggml_tensor * t) override {
  1444. return ggml_nbytes(t) * 3;
  1445. }
  1446. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  1447. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  1448. std::array<int, 4> nr = {1, 2, 1, 1})
  1449. : op(op), type(type), ne(ne), nr(nr) {}
  1450. ggml_tensor * build_graph(ggml_context * ctx) override {
  1451. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  1452. ggml_set_name(a, "a");
  1453. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1454. ggml_set_name(b, "b");
  1455. // The backward pass supports broadcasting only for GGML_ADD:
  1456. const bool grad_supported = op == ggml_add || ggml_are_same_shape(a, b);
  1457. if (grad_supported) {
  1458. ggml_set_param(a);
  1459. ggml_set_param(b);
  1460. }
  1461. ggml_tensor * out = op(ctx, a, b);
  1462. ggml_set_name(out, "out");
  1463. return out;
  1464. }
  1465. void initialize_tensors(ggml_context * ctx) override {
  1466. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1467. if (op == ggml_mul || op == ggml_div) {
  1468. // MUL and DIV have numerical issues around zero:
  1469. init_tensor_uniform(t, 0.9f, 1.1f);
  1470. } else {
  1471. init_tensor_uniform(t);
  1472. }
  1473. }
  1474. }
  1475. float grad_eps() override {
  1476. return 0.1f * (op == ggml_mul ? ne[0]*ne[1]*ne[2]*ne[3] : 1);
  1477. }
  1478. bool grad_precise() override {
  1479. return op == ggml_div;
  1480. }
  1481. double max_maa_err() override {
  1482. return op == ggml_add ? 1e-4 : 1e-3;
  1483. }
  1484. };
  1485. // GGML_OP_ADD1
  1486. struct test_add1 : public test_case {
  1487. const ggml_type type;
  1488. const std::array<int64_t, 4> ne;
  1489. std::string vars() override {
  1490. return VARS_TO_STR2(type, ne);
  1491. }
  1492. test_add1(ggml_type type = GGML_TYPE_F32,
  1493. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  1494. : type(type), ne(ne) {}
  1495. ggml_tensor * build_graph(ggml_context * ctx) override {
  1496. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1497. ggml_set_param(a);
  1498. ggml_set_name(a, "a");
  1499. ggml_tensor * b = ggml_new_tensor_1d(ctx, type, 1);
  1500. // ggml_set_param(b); // TODO: implement
  1501. ggml_set_name(b, "b");
  1502. ggml_tensor * out = ggml_add1(ctx, a, b);
  1503. ggml_set_name(out, "out");
  1504. return out;
  1505. }
  1506. float grad_eps() override {
  1507. return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
  1508. }
  1509. };
  1510. // GGML_OP_SCALE
  1511. struct test_scale : public test_case {
  1512. const ggml_type type;
  1513. const std::array<int64_t, 4> ne;
  1514. float scale;
  1515. std::string vars() override {
  1516. return VARS_TO_STR3(type, ne, scale);
  1517. }
  1518. test_scale(ggml_type type = GGML_TYPE_F32,
  1519. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  1520. float scale = 2.0f)
  1521. : type(type), ne(ne), scale(scale) {}
  1522. ggml_tensor * build_graph(ggml_context * ctx) override {
  1523. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1524. ggml_set_param(a);
  1525. ggml_set_name(a, "a");
  1526. ggml_tensor * out = ggml_scale(ctx, a, scale);
  1527. ggml_set_name(out, "out");
  1528. return out;
  1529. }
  1530. };
  1531. // GGML_OP_SILU_BACK
  1532. struct test_silu_back : public test_case {
  1533. const ggml_type type;
  1534. const std::array<int64_t, 4> ne;
  1535. float eps;
  1536. std::string vars() override {
  1537. return VARS_TO_STR3(type, ne, eps);
  1538. }
  1539. test_silu_back(ggml_type type = GGML_TYPE_F32,
  1540. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1541. float eps = 1e-6f)
  1542. : type(type), ne(ne), eps(eps) {}
  1543. ggml_tensor * build_graph(ggml_context * ctx) override {
  1544. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1545. ggml_set_name(a, "a");
  1546. ggml_tensor * grad = ggml_new_tensor(ctx, type, 4, ne.data());
  1547. ggml_set_name(grad, "grad");
  1548. ggml_tensor * out = ggml_silu_back(ctx, a, grad);
  1549. ggml_set_name(out, "out");
  1550. return out;
  1551. }
  1552. bool grad_precise() override {
  1553. return true;
  1554. }
  1555. };
  1556. // GGML_OP_NORM
  1557. struct test_norm : public test_case {
  1558. const ggml_type type;
  1559. const std::array<int64_t, 4> ne;
  1560. const bool v; // whether a is a non-contiguous view
  1561. const float eps;
  1562. std::string vars() override {
  1563. return VARS_TO_STR4(type, ne, v, eps);
  1564. }
  1565. test_norm(ggml_type type = GGML_TYPE_F32,
  1566. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1567. bool v = false,
  1568. float eps = 1e-6f)
  1569. : type(type), ne(ne), v(v), eps(eps) {}
  1570. ggml_tensor * build_graph(ggml_context * ctx) override {
  1571. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1572. ggml_set_name(a, "a");
  1573. if (v) {
  1574. a = ggml_view_4d(ctx, a, a->ne[0]/2, a->ne[1]/2, a->ne[2]/2, a->ne[3]/2, a->nb[1], a->nb[2], a->nb[3], 0);
  1575. ggml_set_name(a, "view of a");
  1576. }
  1577. ggml_tensor * out = ggml_norm(ctx, a, eps);
  1578. ggml_set_name(out, "out");
  1579. return out;
  1580. }
  1581. };
  1582. // GGML_OP_RMS_NORM
  1583. struct test_rms_norm : public test_case {
  1584. const ggml_type type;
  1585. const std::array<int64_t, 4> ne;
  1586. const bool v; // whether a is a non-contiguous view
  1587. const float eps;
  1588. std::string vars() override {
  1589. return VARS_TO_STR4(type, ne, v, eps);
  1590. }
  1591. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  1592. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1593. bool v = false,
  1594. float eps = 1e-6f)
  1595. : type(type), ne(ne), v(v), eps(eps) {}
  1596. ggml_tensor * build_graph(ggml_context * ctx) override {
  1597. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1598. ggml_set_param(a);
  1599. ggml_set_name(a, "a");
  1600. if (v) {
  1601. a = ggml_view_4d(ctx, a, a->ne[0]/2, a->ne[1]/2, a->ne[2]/2, a->ne[3]/2, a->nb[1], a->nb[2], a->nb[3], 0);
  1602. ggml_set_name(a, "view of a");
  1603. }
  1604. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  1605. ggml_set_name(out, "out");
  1606. return out;
  1607. }
  1608. void initialize_tensors(ggml_context * ctx) override {
  1609. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1610. init_tensor_uniform(t, -10.f, 10.f);
  1611. }
  1612. }
  1613. float grad_eps() override {
  1614. return 1.0f;
  1615. }
  1616. bool grad_precise() override {
  1617. return true;
  1618. }
  1619. };
  1620. // GGML_OP_RMS_NORM_BACK
  1621. struct test_rms_norm_back : public test_case {
  1622. const ggml_type type;
  1623. const std::array<int64_t, 4> ne;
  1624. const float eps;
  1625. std::string vars() override {
  1626. return VARS_TO_STR3(type, ne, eps);
  1627. }
  1628. test_rms_norm_back(ggml_type type = GGML_TYPE_F32,
  1629. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1630. float eps = 1e-6f)
  1631. : type(type), ne(ne), eps(eps) {}
  1632. ggml_tensor * build_graph(ggml_context * ctx) override {
  1633. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1634. ggml_set_name(a, "a");
  1635. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1636. ggml_set_name(b, "b");
  1637. ggml_tensor * out = ggml_rms_norm_back(ctx, a, b, eps);
  1638. ggml_set_name(out, "out");
  1639. return out;
  1640. }
  1641. void initialize_tensors(ggml_context * ctx) override {
  1642. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1643. init_tensor_uniform(t, -10.f, 10.f);
  1644. }
  1645. }
  1646. };
  1647. // GGML_OP_RMS_NORM + GGML_OP_MUL
  1648. struct test_rms_norm_mul : public test_case {
  1649. const ggml_type type;
  1650. const std::array<int64_t, 4> ne;
  1651. const float eps;
  1652. std::string op_desc(ggml_tensor * t) override {
  1653. GGML_UNUSED(t);
  1654. return "RMS_NORM_MUL";
  1655. }
  1656. bool run_whole_graph() override { return true; }
  1657. std::string vars() override {
  1658. return VARS_TO_STR3(type, ne, eps);
  1659. }
  1660. test_rms_norm_mul(ggml_type type = GGML_TYPE_F32,
  1661. std::array<int64_t, 4> ne = {64, 5, 4, 3},
  1662. float eps = 1e-6f)
  1663. : type(type), ne(ne), eps(eps) {}
  1664. ggml_tensor * build_graph(ggml_context * ctx) override {
  1665. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1666. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  1667. ggml_set_param(a);
  1668. ggml_set_name(a, "a");
  1669. ggml_set_param(b);
  1670. ggml_set_name(b, "b");
  1671. // Use a and b early, so we don't end up with an OP_NONE between rms_norm and mul
  1672. a = ggml_add(ctx, a, b);
  1673. ggml_tensor * out = ggml_mul(ctx, ggml_rms_norm(ctx, a, eps), b);
  1674. ggml_set_name(out, "out");
  1675. return out;
  1676. }
  1677. void initialize_tensors(ggml_context * ctx) override {
  1678. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1679. init_tensor_uniform(t, -10.f, 10.f);
  1680. }
  1681. }
  1682. double max_nmse_err() override {
  1683. return 1e-6;
  1684. }
  1685. float grad_eps() override {
  1686. return 1.0f;
  1687. }
  1688. bool grad_precise() override {
  1689. return true;
  1690. }
  1691. };
  1692. // GGML_OP_SSM_CONV
  1693. struct test_ssm_conv : public test_case {
  1694. const ggml_type type;
  1695. const std::array<int64_t, 4> ne_a;
  1696. const std::array<int64_t, 4> ne_b;
  1697. std::string vars() override {
  1698. return VARS_TO_STR3(type, ne_a, ne_b);
  1699. }
  1700. test_ssm_conv(ggml_type type = GGML_TYPE_F32,
  1701. std::array<int64_t, 4> ne_a = {10, 10, 10, 1},
  1702. std::array<int64_t, 4> ne_b = {3, 3, 1, 1})
  1703. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1704. ggml_tensor * build_graph(ggml_context * ctx) override {
  1705. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1706. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1707. ggml_tensor * out = ggml_ssm_conv(ctx, a, b);
  1708. return out;
  1709. }
  1710. };
  1711. // GGML_OP_SSM_SCAN
  1712. struct test_ssm_scan : public test_case {
  1713. const ggml_type type;
  1714. const int64_t d_state;
  1715. const int64_t head_dim;
  1716. const int64_t n_head;
  1717. const int64_t n_group;
  1718. const int64_t n_seq_tokens;
  1719. const int64_t n_seqs;
  1720. std::string vars() override {
  1721. return VARS_TO_STR7(type, d_state, head_dim, n_head, n_group, n_seq_tokens, n_seqs);
  1722. }
  1723. test_ssm_scan(ggml_type type = GGML_TYPE_F32,
  1724. int64_t d_state = 32,
  1725. int64_t head_dim = 1, // non-zero for Mamba-2
  1726. int64_t n_head = 32,
  1727. int64_t n_group = 1,
  1728. int64_t n_seq_tokens = 32,
  1729. int64_t n_seqs = 32)
  1730. : type(type), d_state(d_state), head_dim(head_dim), n_head(n_head), n_group(n_group), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1731. ggml_tensor * build_graph(ggml_context * ctx) override {
  1732. ggml_tensor * s = ggml_new_tensor_4d(ctx, type, d_state, head_dim, n_head, n_seqs);
  1733. ggml_tensor * x = ggml_new_tensor_4d(ctx, type, head_dim, n_head, n_seq_tokens, n_seqs);
  1734. ggml_tensor * dt = ggml_new_tensor_3d(ctx, type, n_head, n_seq_tokens, n_seqs);
  1735. ggml_tensor * A = ggml_new_tensor_2d(ctx, type, (head_dim > 1) ? 1 : d_state, n_head);
  1736. ggml_tensor * B = ggml_new_tensor_4d(ctx, type, d_state, n_group, n_seq_tokens, n_seqs);
  1737. ggml_tensor * C = ggml_new_tensor_4d(ctx, type, d_state, n_group, n_seq_tokens, n_seqs);
  1738. ggml_tensor * ids = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_seqs);
  1739. ggml_tensor * out = ggml_ssm_scan(ctx, s, x, dt, A, B, C, ids);
  1740. return out;
  1741. }
  1742. // similar to test_mul_mat_id
  1743. void initialize_tensors(ggml_context * ctx) override {
  1744. std::random_device rd;
  1745. std::default_random_engine rng(rd());
  1746. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1747. if (t->type == GGML_TYPE_I32) {
  1748. if (ggml_is_view_op(t->op)) { continue; }
  1749. // ids
  1750. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1751. std::vector<int32_t> data(t->ne[0]);
  1752. for (int i = 0; i < t->ne[0]; i++) {
  1753. data[i] = i;
  1754. }
  1755. std::shuffle(data.begin(), data.end(), rng);
  1756. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  1757. }
  1758. } else {
  1759. init_tensor_uniform(t);
  1760. }
  1761. }
  1762. }
  1763. };
  1764. // GGML_OP_RWKV_WKV6
  1765. struct test_rwkv_wkv6 : public test_case {
  1766. const ggml_type type;
  1767. const int64_t head_count;
  1768. const int64_t head_size;
  1769. const int64_t n_seq_tokens;
  1770. const int64_t n_seqs;
  1771. std::string vars() override {
  1772. return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs);
  1773. }
  1774. test_rwkv_wkv6(ggml_type type = GGML_TYPE_F32,
  1775. int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1776. : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1777. ggml_tensor * build_graph(ggml_context * ctx) override {
  1778. const int64_t n_tokens = n_seq_tokens * n_seqs;
  1779. ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1780. ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1781. ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1782. ggml_tensor * tf = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size, head_count }.data());
  1783. ggml_tensor * td = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1784. ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size * head_size * head_count, n_seqs }.data());
  1785. ggml_tensor * out = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, s);
  1786. return out;
  1787. }
  1788. };
  1789. // GGML_OP_GATED_LINEAR_ATTN
  1790. struct test_gla : public test_case {
  1791. const ggml_type type;
  1792. const int64_t head_count;
  1793. const int64_t head_size;
  1794. const int64_t n_seq_tokens;
  1795. const int64_t n_seqs;
  1796. std::string vars() override {
  1797. return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs);
  1798. }
  1799. test_gla(ggml_type type = GGML_TYPE_F32,
  1800. int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1801. : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1802. ggml_tensor * build_graph(ggml_context * ctx) override {
  1803. const int64_t n_tokens = n_seq_tokens * n_seqs;
  1804. ggml_tensor * q = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1805. ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1806. ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1807. ggml_tensor * g = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1808. ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size * head_size * head_count, n_seqs }.data());
  1809. ggml_tensor * out = ggml_gated_linear_attn(ctx, k, v, q, g, s, pow(head_size, -0.5));
  1810. return out;
  1811. }
  1812. };
  1813. // GGML_OP_RWKV_WKV7
  1814. struct test_rwkv_wkv7 : public test_case {
  1815. const ggml_type type;
  1816. const int64_t head_count;
  1817. const int64_t head_size;
  1818. const int64_t n_seq_tokens;
  1819. const int64_t n_seqs;
  1820. std::string vars() override {
  1821. return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs);
  1822. }
  1823. test_rwkv_wkv7(ggml_type type = GGML_TYPE_F32,
  1824. int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32)
  1825. : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {}
  1826. ggml_tensor * build_graph(ggml_context * ctx) override {
  1827. const int64_t n_tokens = n_seq_tokens * n_seqs;
  1828. ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1829. ggml_tensor * w = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1830. ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1831. ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1832. ggml_tensor * a = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1833. ggml_tensor * b = ggml_new_tensor(ctx, type, 3, std::vector<int64_t>{ head_size, head_count, n_tokens }.data());
  1834. // Outputs may become NaN with long seqlen without these normalization
  1835. a = ggml_l2_norm(ctx, a, 1e-7F);
  1836. b = ggml_l2_norm(ctx, b, 1e-7F);
  1837. ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector<int64_t>{ head_size * head_size * head_count, n_seqs }.data());
  1838. ggml_tensor * out = ggml_rwkv_wkv7(ctx, r, w, k, v, a, b, s);
  1839. return out;
  1840. }
  1841. };
  1842. // GGML_OP_MUL_MAT
  1843. struct test_mul_mat : public test_case {
  1844. const ggml_type type_a;
  1845. const ggml_type type_b;
  1846. const int64_t m;
  1847. const int64_t n;
  1848. const int64_t k;
  1849. const std::array<int64_t, 2> bs; // dims 3 and 4
  1850. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  1851. const std::array<int64_t, 4> per; // permutation of dimensions
  1852. const bool v; // whether a and b are non-contiguous views
  1853. std::string vars() override {
  1854. return VARS_TO_STR9(type_a, type_b, m, n, k, bs, nr, per, v);
  1855. }
  1856. double max_nmse_err() override {
  1857. return 5e-4;
  1858. }
  1859. int64_t grad_nmax() override {
  1860. return 20000;
  1861. }
  1862. uint64_t op_flops(ggml_tensor * t) override {
  1863. GGML_UNUSED(t);
  1864. return 2 * m * n * k * bs[0] * nr[0] * bs[1] * nr[1];
  1865. }
  1866. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  1867. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  1868. std::array<int64_t, 2> bs = {10, 10},
  1869. std::array<int64_t, 2> nr = {2, 2},
  1870. std::array<int64_t, 4> per = {0, 1, 2, 3},
  1871. bool v = false)
  1872. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), per(per), v(v) {}
  1873. ggml_tensor * build_graph(ggml_context * ctx) override {
  1874. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  1875. ggml_tensor * a;
  1876. ggml_tensor * b;
  1877. const int npermuted = (per[0] != 0) + (per[1] != 1) + (per[2] != 2) + (per[3] != 3);
  1878. if (npermuted > 0) {
  1879. GGML_ASSERT(npermuted == 2);
  1880. GGML_ASSERT(!v); // not handled
  1881. GGML_ASSERT(!ggml_is_quantized(type_a) || per[0] == 0);
  1882. GGML_ASSERT(!ggml_is_quantized(type_b) || per[0] == 0);
  1883. // Create tensors with the permuted dimensions, then permute them back to the dimensions given by m,n,k.
  1884. const int64_t ne_a[4] = {k, m, bs[0], bs[1]};
  1885. const int64_t ne_b[4] = {k, n, bs[0]*nr[0], bs[1]*nr[1]};
  1886. a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]);
  1887. b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]);
  1888. if (!ggml_is_quantized(type_a)) {
  1889. if (bs[1] == 1 && nr[1] == 1) {
  1890. ggml_set_param(a);
  1891. }
  1892. ggml_set_param(b);
  1893. }
  1894. ggml_set_name(a, "a");
  1895. ggml_set_name(b, "b");
  1896. a = ggml_permute(ctx, a, per[0], per[1], per[2], per[3]);
  1897. b = ggml_permute(ctx, b, per[0], per[1], per[2], per[3]);
  1898. ggml_set_name(a, "a_permuted");
  1899. ggml_set_name(b, "b_permuted");
  1900. } else {
  1901. if (v) {
  1902. a = ggml_new_tensor_4d(ctx, type_a, k*2, m, bs[0], bs[1]);
  1903. b = ggml_new_tensor_4d(ctx, type_b, k*2, n, bs[0]*nr[0], bs[1]*nr[1]);
  1904. if (!ggml_is_quantized(type_a)) {
  1905. if (bs[1] == 1 && nr[1] == 1) {
  1906. ggml_set_param(a);
  1907. }
  1908. ggml_set_param(b);
  1909. }
  1910. a = ggml_view_4d(ctx, a, k, m, bs[0], bs[1], a->nb[1], a->nb[2], a->nb[3], 0);
  1911. b = ggml_view_4d(ctx, b, k, n, bs[0]*nr[0], bs[1]*nr[1], b->nb[1], b->nb[2], b->nb[3], 0);
  1912. } else {
  1913. a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]);
  1914. b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  1915. if (!ggml_is_quantized(type_a)) {
  1916. if (bs[1] == 1 && nr[1] == 1) {
  1917. ggml_set_param(a);
  1918. }
  1919. ggml_set_param(b);
  1920. }
  1921. }
  1922. ggml_set_name(a, "a");
  1923. ggml_set_name(b, "b");
  1924. }
  1925. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  1926. ggml_set_name(out, "out");
  1927. return out;
  1928. }
  1929. };
  1930. // GGML_OP_MUL_MAT_ID
  1931. struct test_mul_mat_id : public test_case {
  1932. const ggml_type type_a;
  1933. const ggml_type type_b;
  1934. const int n_mats;
  1935. const int n_used;
  1936. const bool b; // broadcast b matrix
  1937. const int64_t m;
  1938. const int64_t n;
  1939. const int64_t k;
  1940. std::string vars() override {
  1941. return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k);
  1942. }
  1943. double max_nmse_err() override {
  1944. return 5e-4;
  1945. }
  1946. uint64_t op_flops(ggml_tensor * t) override {
  1947. GGML_UNUSED(t);
  1948. return 2 * m * k * n * n_used;
  1949. }
  1950. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  1951. int n_mats = 8, int n_used = 2, bool b = false,
  1952. int64_t m = 32, int64_t n = 32, int64_t k = 32)
  1953. : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b),
  1954. m(m), n(n), k(k) {
  1955. GGML_ASSERT(n_used <= n_mats);
  1956. }
  1957. ggml_tensor * build_graph(ggml_context * ctx) override {
  1958. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  1959. ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats);
  1960. ggml_set_name(as, "as");
  1961. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  1962. ggml_set_name(ids, "ids");
  1963. if (n_used != n_mats) {
  1964. ids = ggml_view_2d(ctx, ids, n_used, n, ids->nb[1], 0);
  1965. ggml_set_name(ids, "view_of_ids");
  1966. }
  1967. ggml_tensor * b = ggml_new_tensor_3d(ctx, type_b, k, this->b ? 1 : n_used, n);
  1968. ggml_set_name(b, "b");
  1969. ggml_tensor * out = ggml_mul_mat_id(ctx, as, b, ids);
  1970. ggml_set_name(out, "out");
  1971. return out;
  1972. }
  1973. void initialize_tensors(ggml_context * ctx) override {
  1974. std::random_device rd;
  1975. std::default_random_engine rng(rd());
  1976. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  1977. if (t->type == GGML_TYPE_I32) {
  1978. if (ggml_is_view_op(t->op)) { continue; }
  1979. // ids
  1980. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  1981. std::vector<int32_t> data(t->ne[0]);
  1982. for (int i = 0; i < t->ne[0]; i++) {
  1983. data[i] = i % n_mats;
  1984. }
  1985. std::shuffle(data.begin(), data.end(), rng);
  1986. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  1987. }
  1988. } else {
  1989. init_tensor_uniform(t);
  1990. }
  1991. }
  1992. }
  1993. };
  1994. // GGML_OP_OUT_PROD
  1995. struct test_out_prod : public test_case {
  1996. const ggml_type type_a;
  1997. const ggml_type type_b;
  1998. const int64_t m;
  1999. const int64_t n;
  2000. const int64_t k;
  2001. const std::array<int64_t, 2> bs; // dims 3 and 4
  2002. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  2003. const bool trans_b;
  2004. std::string vars() override {
  2005. return VARS_TO_STR8(type_a, type_b, m, n, k, bs, nr, trans_b);
  2006. }
  2007. double max_nmse_err() override {
  2008. return 5e-4;
  2009. }
  2010. test_out_prod(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  2011. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  2012. std::array<int64_t, 2> bs = {10, 10},
  2013. std::array<int64_t, 2> nr = {2, 2},
  2014. bool trans_b = false)
  2015. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), trans_b(trans_b) {}
  2016. ggml_tensor * build_graph(ggml_context * ctx) override {
  2017. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, m, k, bs[0], bs[1]);
  2018. ggml_set_name(a, "a");
  2019. ggml_tensor * b;
  2020. if (trans_b) {
  2021. b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  2022. b = ggml_transpose(ctx, b);
  2023. } else {
  2024. b = ggml_new_tensor_4d(ctx, type_b, n, k, bs[0]*nr[0], bs[1]*nr[1]);
  2025. }
  2026. ggml_set_name(b, "b");
  2027. ggml_tensor * out = ggml_out_prod(ctx, a, b);
  2028. ggml_set_name(out, "out");
  2029. return out;
  2030. }
  2031. };
  2032. // GGML_OP_SQR
  2033. struct test_sqr : public test_case {
  2034. const ggml_type type;
  2035. const std::array<int64_t, 4> ne;
  2036. std::string vars() override {
  2037. return VARS_TO_STR2(type, ne);
  2038. }
  2039. test_sqr(ggml_type type = GGML_TYPE_F32,
  2040. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2041. : type(type), ne(ne) {}
  2042. ggml_tensor * build_graph(ggml_context * ctx) override {
  2043. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2044. ggml_set_param(a);
  2045. ggml_set_name(a, "a");
  2046. ggml_tensor * out = ggml_sqr(ctx, a);
  2047. ggml_set_name(out, "out");
  2048. return out;
  2049. }
  2050. float grad_eps() override {
  2051. return 0.1f * 0.25f*ne[0]*ne[1]*ne[2]*ne[3]; // 10% of expected value of sum.
  2052. }
  2053. };
  2054. // GGML_OP_SQRT
  2055. struct test_sqrt : public test_case {
  2056. const ggml_type type;
  2057. const std::array<int64_t, 4> ne;
  2058. std::string vars() override {
  2059. return VARS_TO_STR2(type, ne);
  2060. }
  2061. test_sqrt(ggml_type type = GGML_TYPE_F32,
  2062. std::array<int64_t, 4> ne = {10, 3, 3, 2})
  2063. : type(type), ne(ne) {}
  2064. ggml_tensor * build_graph(ggml_context * ctx) override {
  2065. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2066. ggml_set_param(a);
  2067. ggml_set_name(a, "a");
  2068. ggml_tensor * out = ggml_sqrt(ctx, a);
  2069. ggml_set_name(out, "out");
  2070. return out;
  2071. }
  2072. void initialize_tensors(ggml_context * ctx) override {
  2073. // fill with positive values
  2074. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2075. init_tensor_uniform(t, 50.0f, 100.0f);
  2076. }
  2077. }
  2078. float grad_eps() override {
  2079. return 20.0f;
  2080. }
  2081. bool grad_precise() override {
  2082. return true;
  2083. }
  2084. };
  2085. // GGML_OP_LOG
  2086. struct test_log : public test_case {
  2087. const ggml_type type;
  2088. const std::array<int64_t, 4> ne;
  2089. std::string vars() override {
  2090. return VARS_TO_STR2(type, ne);
  2091. }
  2092. test_log(ggml_type type = GGML_TYPE_F32,
  2093. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2094. : type(type), ne(ne) {}
  2095. ggml_tensor * build_graph(ggml_context * ctx) override {
  2096. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2097. ggml_set_param(a);
  2098. ggml_set_name(a, "a");
  2099. ggml_tensor * out = ggml_log(ctx, a);
  2100. ggml_set_name(out, "out");
  2101. return out;
  2102. }
  2103. void initialize_tensors(ggml_context * ctx) override {
  2104. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2105. // log(1) == 0, cluster values there to keep the sum low for better precision in the backward pass:
  2106. init_tensor_uniform(t, 0.9f, 1.1f);
  2107. }
  2108. }
  2109. bool grad_precise() override {
  2110. return true;
  2111. }
  2112. };
  2113. // GGML_OP_SIN
  2114. struct test_sin : public test_case {
  2115. const ggml_type type;
  2116. const std::array<int64_t, 4> ne;
  2117. std::string vars() override {
  2118. return VARS_TO_STR2(type, ne);
  2119. }
  2120. test_sin(ggml_type type = GGML_TYPE_F32,
  2121. std::array<int64_t, 4> ne = {10, 2, 2, 2})
  2122. : type(type), ne(ne) {}
  2123. ggml_tensor * build_graph(ggml_context * ctx) override {
  2124. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2125. ggml_set_param(a);
  2126. ggml_set_name(a, "a");
  2127. ggml_tensor * out = ggml_sin(ctx, a);
  2128. ggml_set_name(out, "out");
  2129. return out;
  2130. }
  2131. void initialize_tensors(ggml_context * ctx) override {
  2132. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2133. init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi].
  2134. }
  2135. }
  2136. double max_maa_err() override {
  2137. return 1e-3;
  2138. }
  2139. float grad_eps() override {
  2140. return 0.2f;
  2141. }
  2142. bool grad_precise() override {
  2143. return true;
  2144. }
  2145. };
  2146. // GGML_OP_COS
  2147. struct test_cos : public test_case {
  2148. const ggml_type type;
  2149. const std::array<int64_t, 4> ne;
  2150. std::string vars() override {
  2151. return VARS_TO_STR2(type, ne);
  2152. }
  2153. test_cos(ggml_type type = GGML_TYPE_F32,
  2154. std::array<int64_t, 4> ne = {10, 2, 2, 2})
  2155. : type(type), ne(ne) {}
  2156. ggml_tensor * build_graph(ggml_context * ctx) override {
  2157. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2158. ggml_set_param(a);
  2159. ggml_set_name(a, "a");
  2160. ggml_tensor * out = ggml_cos(ctx, a);
  2161. ggml_set_name(out, "out");
  2162. return out;
  2163. }
  2164. void initialize_tensors(ggml_context * ctx) override {
  2165. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2166. init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi].
  2167. }
  2168. }
  2169. double max_maa_err() override {
  2170. return 1e-3;
  2171. }
  2172. float grad_eps() override {
  2173. return 0.2f;
  2174. }
  2175. bool grad_precise() override {
  2176. return true;
  2177. }
  2178. };
  2179. // GGML_OP_CLAMP
  2180. struct test_clamp : public test_case {
  2181. const ggml_type type;
  2182. const std::array<int64_t, 4> ne;
  2183. float min;
  2184. float max;
  2185. std::string vars() override {
  2186. return VARS_TO_STR4(type, ne, min, max);
  2187. }
  2188. test_clamp(ggml_type type = GGML_TYPE_F32,
  2189. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  2190. float min = -0.5f, float max = 0.5f)
  2191. : type(type), ne(ne), min(min), max(max) {}
  2192. ggml_tensor * build_graph(ggml_context * ctx) override {
  2193. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2194. ggml_set_name(a, "a");
  2195. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  2196. ggml_set_name(out, "out");
  2197. return out;
  2198. }
  2199. float grad_eps() override {
  2200. return 1e-2f;
  2201. }
  2202. std::vector<float> grad_expect() override {
  2203. return {0.0f, 1.0f};
  2204. }
  2205. };
  2206. // GGML_OP_DIAG_MASK_INF
  2207. struct test_diag_mask_inf : public test_case {
  2208. const ggml_type type;
  2209. const std::array<int64_t, 4> ne;
  2210. const int n_past;
  2211. std::string vars() override {
  2212. return VARS_TO_STR3(type, ne, n_past);
  2213. }
  2214. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  2215. std::array<int64_t, 4> ne = {10, 10, 3, 2},
  2216. int n_past = 5)
  2217. : type(type), ne(ne), n_past(n_past) {}
  2218. ggml_tensor * build_graph(ggml_context * ctx) override {
  2219. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2220. ggml_set_param(a);
  2221. ggml_set_name(a, "a");
  2222. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  2223. ggml_set_name(out, "out");
  2224. return out;
  2225. }
  2226. };
  2227. // GGML_OP_SOFT_MAX
  2228. struct test_soft_max : public test_case {
  2229. const ggml_type type;
  2230. const std::array<int64_t, 4> ne;
  2231. const bool mask;
  2232. const ggml_type m_prec;
  2233. const std::array<int64_t, 2> nr23; // broadcast only dims 2 and 3
  2234. const float scale;
  2235. const float max_bias;
  2236. std::string vars() override {
  2237. return VARS_TO_STR7(type, ne, mask, m_prec, nr23, scale, max_bias);
  2238. }
  2239. // the 1024 test with bias occasionally fails:
  2240. // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL
  2241. virtual double max_nmse_err() override {
  2242. return 1e-6;
  2243. }
  2244. test_soft_max(ggml_type type = GGML_TYPE_F32,
  2245. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  2246. bool mask = false,
  2247. ggml_type m_prec = GGML_TYPE_F32,
  2248. std::array<int64_t, 2> nr23 = {1, 1},
  2249. float scale = 1.0f,
  2250. float max_bias = 0.0f)
  2251. : type(type), ne(ne), mask(mask), m_prec(m_prec), nr23(nr23), scale(scale), max_bias(max_bias) {}
  2252. ggml_tensor * build_graph(ggml_context * ctx) override {
  2253. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2]*nr23[0], ne[3]*nr23[1]);
  2254. ggml_set_param(a);
  2255. ggml_set_name(a, "a");
  2256. ggml_tensor * mask = nullptr;
  2257. if (this->mask) {
  2258. mask = ggml_new_tensor_4d(ctx, m_prec, ne[0], ne[1], ne[2], ne[3]);
  2259. ggml_set_name(mask, "mask");
  2260. }
  2261. ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
  2262. ggml_set_name(out, "out");
  2263. return out;
  2264. }
  2265. bool grad_precise() override {
  2266. return true;
  2267. }
  2268. };
  2269. // GGML_OP_SOFT_MAX_BACK
  2270. struct test_soft_max_back : public test_case {
  2271. const ggml_type type;
  2272. const std::array<int64_t, 4> ne;
  2273. const float scale;
  2274. const float max_bias;
  2275. std::string vars() override {
  2276. return VARS_TO_STR4(type, ne, scale, max_bias);
  2277. }
  2278. test_soft_max_back(ggml_type type = GGML_TYPE_F32,
  2279. std::array<int64_t, 4> ne = {10, 5, 4, 3},
  2280. float scale = 1.0f,
  2281. float max_bias = 0.0f)
  2282. : type(type), ne(ne), scale(scale), max_bias(max_bias) {}
  2283. ggml_tensor * build_graph(ggml_context * ctx) override {
  2284. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2285. ggml_set_name(a, "a");
  2286. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  2287. ggml_set_name(a, "a");
  2288. ggml_tensor * out = ggml_soft_max_ext_back(ctx, a, b, scale, max_bias);
  2289. ggml_set_name(out, "out");
  2290. return out;
  2291. }
  2292. };
  2293. // GGML_OP_ROPE + GGML_OP_ROPE_BACK
  2294. struct test_rope : public test_case {
  2295. const ggml_type type;
  2296. const std::array<int64_t, 4> ne_a;
  2297. int n_dims;
  2298. int mode;
  2299. int n_ctx; // used to generate positions
  2300. float fs; // freq_scale
  2301. float ef; // ext_factor
  2302. float af; // attn_factor
  2303. bool ff;
  2304. int v; // view (1 : non-contiguous a)
  2305. bool forward;
  2306. std::string vars() override {
  2307. // forward can be inferred from the op, does not need to be printed
  2308. return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v);
  2309. }
  2310. test_rope(ggml_type type = GGML_TYPE_F32,
  2311. std::array<int64_t, 4> ne_a = {10, 5, 3, 1},
  2312. int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f,
  2313. float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0, bool forward = true)
  2314. : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v), forward(forward) {}
  2315. ggml_tensor * build_graph(ggml_context * ctx) override {
  2316. ggml_tensor * a;
  2317. if (v & 1) {
  2318. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  2319. a = ggml_new_tensor(ctx, type, 4, ne.data());
  2320. if (forward) {
  2321. ggml_set_param(a);
  2322. }
  2323. ggml_set_name(a, "a");
  2324. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  2325. ggml_set_name(a, "view_of_a");
  2326. } else {
  2327. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2328. if (forward) {
  2329. ggml_set_param(a);
  2330. }
  2331. ggml_set_name(a, "a");
  2332. }
  2333. const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
  2334. const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
  2335. ggml_tensor * pos;
  2336. if (is_mrope || is_vision) {
  2337. pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2] * 4);
  2338. } else {
  2339. pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]);
  2340. }
  2341. ggml_set_name(pos, "pos");
  2342. ggml_tensor * freq = nullptr;
  2343. if (ff) {
  2344. freq = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2);
  2345. ggml_set_name(freq, "freq");
  2346. }
  2347. ggml_tensor * out;
  2348. if (is_mrope) {
  2349. if (is_vision) {
  2350. GGML_ASSERT(n_dims/4 > 0);
  2351. int rope_sections[4] = {n_dims/4, n_dims/4, 0, 0}; // Vision-RoPE only use first two dimension for image (x, y) coordinate
  2352. if (forward) {
  2353. out = ggml_rope_multi (ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2354. } else {
  2355. out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2356. }
  2357. } else {
  2358. GGML_ASSERT(n_dims/3 > 0);
  2359. int rope_sections[4] = {n_dims/3, n_dims/3, n_dims/3, 0};
  2360. if (forward) {
  2361. out = ggml_rope_multi (ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2362. } else {
  2363. out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2364. }
  2365. }
  2366. } else {
  2367. if (forward) {
  2368. out = ggml_rope_ext (ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2369. } else {
  2370. out = ggml_rope_ext_back(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f);
  2371. }
  2372. // TODO: add test with a non-contiguous view as input ; this case is needed for build_rope_2d in clip.cpp
  2373. }
  2374. ggml_set_name(out, "out");
  2375. return out;
  2376. }
  2377. void initialize_tensors(ggml_context * ctx) override {
  2378. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2379. if (t->type == GGML_TYPE_I32) {
  2380. // pos
  2381. const int num_pos_ids = (mode & GGML_ROPE_TYPE_MROPE) ? ne_a[2] * 4 : ne_a[2];
  2382. std::vector<int> data(num_pos_ids);
  2383. for (int i = 0; i < num_pos_ids; i++) {
  2384. data[i] = rand() % n_ctx;
  2385. }
  2386. ggml_backend_tensor_set(t, data.data(), 0, num_pos_ids * sizeof(int));
  2387. } else {
  2388. if (t->ne[0] == n_dims/2) {
  2389. // frequency factors in the range [0.9f, 1.1f]
  2390. init_tensor_uniform(t, 0.9f, 1.1f);
  2391. } else {
  2392. init_tensor_uniform(t);
  2393. }
  2394. }
  2395. }
  2396. }
  2397. double max_maa_err() override {
  2398. return 1e-3;
  2399. }
  2400. bool grad_precise() override {
  2401. return true;
  2402. }
  2403. };
  2404. // GGML_OP_POOL2D
  2405. struct test_pool2d : public test_case {
  2406. enum ggml_op_pool pool_type;
  2407. const ggml_type type_input;
  2408. const std::array<int64_t, 4> ne_input;
  2409. // kernel size
  2410. const int k0;
  2411. const int k1;
  2412. // stride
  2413. const int s0;
  2414. const int s1;
  2415. // padding
  2416. const int p0;
  2417. const int p1;
  2418. std::string vars() override {
  2419. return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1);
  2420. }
  2421. test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG,
  2422. ggml_type type_input = GGML_TYPE_F32,
  2423. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  2424. int k0 = 3, int k1 = 3,
  2425. int s0 = 1, int s1 = 1,
  2426. int p0 = 1, int p1 = 1)
  2427. : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {}
  2428. ggml_tensor * build_graph(ggml_context * ctx) override {
  2429. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  2430. ggml_set_param(input);
  2431. ggml_set_name(input, "input");
  2432. ggml_tensor * out = ggml_pool_2d(ctx, input, pool_type, k0, k1, s0, s1, p0, p1);
  2433. ggml_set_name(out, "out");
  2434. return out;
  2435. }
  2436. };
  2437. // GGML_OP_CONV_TRANSPOSE_1D
  2438. struct test_conv_transpose_1d : public test_case {
  2439. const std::array<int64_t, 4> ne_input;
  2440. const std::array<int64_t, 4> ne_kernel;
  2441. const int s0; // stride
  2442. const int p0; // padding
  2443. const int d0; // dilation
  2444. std::string vars() override {
  2445. return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0);
  2446. }
  2447. test_conv_transpose_1d(std::array<int64_t, 4> ne_input = {197, 32, 1, 1}, // [input_width, input_channels, 1 /* assert in cpu kernel*/, 1 (should be batch)]
  2448. std::array<int64_t, 4> ne_kernel = {16, 32, 32, 1}, // [kernel_width, output_channels, input_channels, 1 (should be batch)]
  2449. int s0 = 1, int p0 = 0, int d0 = 1)
  2450. : ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {}
  2451. ggml_tensor * build_graph(ggml_context * ctx) override {
  2452. ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
  2453. ggml_set_name(input, "input");
  2454. ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
  2455. ggml_set_name(kernel, "kernel");
  2456. ggml_tensor * out = ggml_conv_transpose_1d(ctx, kernel, input, s0, p0, d0);
  2457. ggml_set_name(out, "out");
  2458. return out;
  2459. }
  2460. };
  2461. // GGML_OP_CONV_TRANSPOSE_2D
  2462. struct test_conv_transpose_2d : public test_case {
  2463. const std::array<int64_t, 4> ne_input;
  2464. const std::array<int64_t, 4> ne_kernel;
  2465. const int stride;
  2466. std::string vars() override {
  2467. return VARS_TO_STR3(ne_input, ne_kernel, stride);
  2468. }
  2469. test_conv_transpose_2d(std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  2470. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  2471. int stride = 1)
  2472. : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride){}
  2473. ggml_tensor * build_graph(ggml_context * ctx) override {
  2474. ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
  2475. ggml_set_name(input, "input");
  2476. ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne_kernel.data());
  2477. ggml_set_name(kernel, "kernel");
  2478. ggml_tensor * out = ggml_conv_transpose_2d_p0(ctx, kernel, input, stride);
  2479. ggml_set_name(out, "out");
  2480. return out;
  2481. }
  2482. };
  2483. // GGML_OP_IM2COL
  2484. struct test_im2col : public test_case {
  2485. const ggml_type type_input;
  2486. const ggml_type type_kernel;
  2487. const ggml_type dst_type;
  2488. const std::array<int64_t, 4> ne_input;
  2489. const std::array<int64_t, 4> ne_kernel;
  2490. // stride
  2491. const int s0;
  2492. const int s1;
  2493. // padding
  2494. const int p0;
  2495. const int p1;
  2496. // dilation
  2497. const int d0;
  2498. const int d1;
  2499. // mode
  2500. const bool is_2D;
  2501. std::string vars() override {
  2502. return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  2503. }
  2504. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32,
  2505. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  2506. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  2507. int s0 = 1, int s1 = 1,
  2508. int p0 = 1, int p1 = 1,
  2509. int d0 = 1, int d1 = 1,
  2510. bool is_2D = true)
  2511. : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  2512. ggml_tensor * build_graph(ggml_context * ctx) override {
  2513. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  2514. ggml_set_param(input);
  2515. ggml_set_name(input, "input");
  2516. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  2517. ggml_set_name(kernel, "kernel");
  2518. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D, dst_type);
  2519. ggml_set_name(out, "out");
  2520. return out;
  2521. }
  2522. };
  2523. // GGML_OP_CONV_2D_DW
  2524. struct test_conv_2d_dw : public test_case {
  2525. const std::array<int64_t, 4> ne_input;
  2526. const std::array<int64_t, 4> ne_kernel;
  2527. const int stride;
  2528. const int padding;
  2529. const int dilation;
  2530. const bool cwhn;
  2531. std::string vars() override {
  2532. return VARS_TO_STR6(ne_input, ne_kernel, stride, padding, dilation, cwhn);
  2533. }
  2534. test_conv_2d_dw(std::array<int64_t, 4> ne_input = {64, 64, 16, 1},
  2535. std::array<int64_t, 4> ne_kernel = {3, 3, 1, 16},
  2536. int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false)
  2537. : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn) {}
  2538. ggml_tensor * build_graph(ggml_context * ctx) override {
  2539. ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
  2540. ggml_set_name(input, "input");
  2541. ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
  2542. ggml_set_name(kernel, "kernel");
  2543. if (cwhn) {
  2544. // change memory layout to channel-most-contiguous (CWHN),
  2545. // then permute it back so NE matches the original input
  2546. input = ggml_cont(ctx, ggml_permute(ctx, input, 1, 2, 0, 3));
  2547. input = ggml_permute(ctx, input, 2, 0, 1, 3);
  2548. kernel = ggml_cont(ctx, ggml_permute(ctx, kernel, 2, 3, 1, 0));
  2549. kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1);
  2550. }
  2551. ggml_tensor * out = ggml_conv_2d_dw_direct(
  2552. ctx, kernel, input,
  2553. stride, stride, padding, padding, dilation, dilation);
  2554. ggml_set_name(out, "out");
  2555. return out;
  2556. }
  2557. };
  2558. // GGML_OP_CONCAT
  2559. struct test_concat : public test_case {
  2560. const ggml_type type;
  2561. const std::array<int64_t, 4> ne_a;
  2562. const int64_t ne_b_d;
  2563. const int dim;
  2564. const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b)
  2565. std::string vars() override {
  2566. return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v);
  2567. }
  2568. test_concat(ggml_type type = GGML_TYPE_F32,
  2569. std::array<int64_t, 4> ne_a = {10, 5, 5, 5},
  2570. int64_t ne_b_d = 5,
  2571. int dim = 2, int v = 0)
  2572. : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {}
  2573. ggml_tensor * build_graph(ggml_context * ctx) override {
  2574. auto ne_b = ne_a;
  2575. ne_b[dim] = ne_b_d;
  2576. ggml_tensor * a;
  2577. if (v & 1) {
  2578. auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3;
  2579. a = ggml_new_tensor(ctx, type, 4, ne.data());
  2580. ggml_set_name(a, "a");
  2581. a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0);
  2582. ggml_set_name(a, "view_of_a");
  2583. } else {
  2584. a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2585. ggml_set_name(a, "a");
  2586. }
  2587. ggml_tensor * b;
  2588. if (v & 2) {
  2589. auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4;
  2590. b = ggml_new_tensor(ctx, type, 4, ne.data());
  2591. ggml_set_name(b, "b");
  2592. b = ggml_view_4d(ctx, b, ne_b[0], ne_b[1], ne_b[2], ne_b[3], b->nb[1], b->nb[2], b->nb[3], 0);
  2593. ggml_set_name(b, "view_of_b");
  2594. } else {
  2595. b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  2596. ggml_set_name(b, "b");
  2597. }
  2598. ggml_tensor * out = ggml_concat(ctx, a, b, dim);
  2599. ggml_set_name(out, "out");
  2600. return out;
  2601. }
  2602. };
  2603. // GGML_OP_ARGSORT
  2604. struct test_argsort : public test_case {
  2605. const ggml_type type;
  2606. const std::array<int64_t, 4> ne;
  2607. ggml_sort_order order;
  2608. std::string vars() override {
  2609. return VARS_TO_STR3(type, ne, order);
  2610. }
  2611. test_argsort(ggml_type type = GGML_TYPE_F32,
  2612. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  2613. ggml_sort_order order = GGML_SORT_ORDER_ASC)
  2614. : type(type), ne(ne), order(order) {}
  2615. ggml_tensor * build_graph(ggml_context * ctx) override {
  2616. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2617. ggml_set_name(a, "a");
  2618. ggml_tensor * out = ggml_argsort(ctx, a, order);
  2619. ggml_set_name(out, "out");
  2620. return out;
  2621. }
  2622. void initialize_tensors(ggml_context * ctx) override {
  2623. std::random_device rd;
  2624. std::default_random_engine rng(rd());
  2625. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  2626. if (t->type == GGML_TYPE_I32) {
  2627. // indices
  2628. std::vector<int> data(ggml_nelements(t));
  2629. for (int i = 0; i < ggml_nelements(t); i++) {
  2630. data[i] = rand();
  2631. }
  2632. std::shuffle(data.begin(), data.end(), rng);
  2633. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  2634. } else if (t->type == GGML_TYPE_F32) {
  2635. // initialize with unique values to avoid ties
  2636. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  2637. std::vector<float> data(t->ne[0]);
  2638. for (int i = 0; i < t->ne[0]; i++) {
  2639. data[i] = i;
  2640. }
  2641. std::shuffle(data.begin(), data.end(), rng);
  2642. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  2643. }
  2644. } else {
  2645. GGML_ABORT("fatal error");
  2646. }
  2647. }
  2648. }
  2649. };
  2650. // GGML_OP_SUM
  2651. struct test_sum : public test_case {
  2652. const ggml_type type;
  2653. const std::array<int64_t, 4> ne;
  2654. std::string vars() override {
  2655. return VARS_TO_STR2(type, ne);
  2656. }
  2657. test_sum(ggml_type type = GGML_TYPE_F32,
  2658. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2659. : type(type), ne(ne) {}
  2660. ggml_tensor * build_graph(ggml_context * ctx) override {
  2661. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2662. ggml_set_param(a);
  2663. ggml_set_name(a, "a");
  2664. ggml_tensor * out = ggml_sum(ctx, a);
  2665. ggml_set_name(out, "out");
  2666. return out;
  2667. }
  2668. float grad_eps() override {
  2669. return 0.1f * sqrtf(ne[0]*ne[1]*ne[2]*ne[3]);
  2670. }
  2671. };
  2672. // GGML_OP_SUM_ROWS
  2673. struct test_sum_rows : public test_case {
  2674. const ggml_type type;
  2675. const std::array<int64_t, 4> ne;
  2676. std::string vars() override {
  2677. return VARS_TO_STR2(type, ne);
  2678. }
  2679. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  2680. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2681. : type(type), ne(ne) {}
  2682. ggml_tensor * build_graph(ggml_context * ctx) override {
  2683. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2684. ggml_set_param(a);
  2685. ggml_set_name(a, "a");
  2686. ggml_tensor * out = ggml_sum_rows(ctx, a);
  2687. ggml_set_name(out, "out");
  2688. return out;
  2689. }
  2690. };
  2691. // GGML_OP_MEAN
  2692. struct test_mean : public test_case {
  2693. const ggml_type type;
  2694. const std::array<int64_t, 4> ne;
  2695. std::string vars() override {
  2696. return VARS_TO_STR2(type, ne);
  2697. }
  2698. test_mean(ggml_type type = GGML_TYPE_F32,
  2699. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  2700. : type(type), ne(ne) {}
  2701. ggml_tensor * build_graph(ggml_context * ctx) override {
  2702. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2703. ggml_set_param(a);
  2704. ggml_set_name(a, "a");
  2705. ggml_tensor * out = ggml_mean(ctx, a);
  2706. ggml_set_name(out, "out");
  2707. return out;
  2708. }
  2709. float grad_eps() override {
  2710. return 0.1f * ne[0]*ne[1]*ne[2]*ne[3];
  2711. }
  2712. };
  2713. // GGML_OP_UPSCALE
  2714. struct test_upscale : public test_case {
  2715. const ggml_type type;
  2716. const std::array<int64_t, 4> ne;
  2717. const int32_t scale_factor;
  2718. const bool transpose;
  2719. const ggml_scale_mode mode;
  2720. std::string vars() override {
  2721. return VARS_TO_STR5(type, ne, scale_factor, mode, transpose);
  2722. }
  2723. test_upscale(ggml_type type = GGML_TYPE_F32,
  2724. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  2725. int32_t scale_factor = 2, ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST, bool transpose = false)
  2726. : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose), mode(mode) {}
  2727. ggml_tensor * build_graph(ggml_context * ctx) override {
  2728. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2729. ggml_set_name(a, "a");
  2730. if (transpose) {
  2731. a = ggml_transpose(ctx, a);
  2732. ggml_set_name(a, "a_transposed");
  2733. }
  2734. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor, mode);
  2735. ggml_set_name(out, "out");
  2736. return out;
  2737. }
  2738. };
  2739. // GGML_OP_UPSCALE (via ggml_interpolate)
  2740. struct test_interpolate : public test_case {
  2741. const ggml_type type;
  2742. const std::array<int64_t, 4> ne;
  2743. const std::array<int64_t, 4> ne_tgt;
  2744. const uint32_t mode = GGML_SCALE_MODE_NEAREST;
  2745. std::string vars() override {
  2746. return VARS_TO_STR4(type, ne, ne_tgt, mode);
  2747. }
  2748. test_interpolate(ggml_type type = GGML_TYPE_F32,
  2749. std::array<int64_t, 4> ne = {2, 5, 7, 11},
  2750. std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13},
  2751. uint32_t mode = GGML_SCALE_MODE_NEAREST)
  2752. : type(type), ne(ne), ne_tgt(ne_tgt), mode(mode) {}
  2753. ggml_tensor * build_graph(ggml_context * ctx) override {
  2754. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2755. ggml_set_name(a, "a");
  2756. ggml_tensor * out = ggml_interpolate(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3], mode);
  2757. ggml_set_name(out, "out");
  2758. return out;
  2759. }
  2760. };
  2761. // GGML_OP_GROUP_NORM
  2762. struct test_group_norm : public test_case {
  2763. const ggml_type type;
  2764. const std::array<int64_t, 4> ne;
  2765. const int32_t num_groups;
  2766. const float eps;
  2767. std::string vars() override {
  2768. return VARS_TO_STR4(type, ne, num_groups, eps);
  2769. }
  2770. test_group_norm(ggml_type type = GGML_TYPE_F32,
  2771. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  2772. int32_t num_groups = 32,
  2773. float eps = 1e-6f)
  2774. : type(type), ne(ne), num_groups(num_groups), eps(eps) {}
  2775. ggml_tensor * build_graph(ggml_context * ctx) override {
  2776. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2777. ggml_set_name(a, "a");
  2778. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups, eps);
  2779. ggml_set_name(out, "out");
  2780. return out;
  2781. }
  2782. };
  2783. // GGML_OP_L2_NORM
  2784. struct test_l2_norm : public test_case {
  2785. const ggml_type type;
  2786. const std::array<int64_t, 4> ne;
  2787. const float eps;
  2788. std::string vars() override {
  2789. return VARS_TO_STR2(type, ne);
  2790. }
  2791. test_l2_norm(ggml_type type = GGML_TYPE_F32,
  2792. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  2793. float eps = 1e-12f)
  2794. : type(type), ne(ne), eps(eps) {}
  2795. ggml_tensor * build_graph(ggml_context * ctx) override {
  2796. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  2797. ggml_set_name(a, "a");
  2798. ggml_tensor * out = ggml_l2_norm(ctx, a, eps);
  2799. ggml_set_name(out, "out");
  2800. return out;
  2801. }
  2802. };
  2803. // GGML_OP_ACC
  2804. struct test_acc : public test_case {
  2805. const ggml_type type;
  2806. const std::array<int64_t, 4> ne_a;
  2807. const std::array<int64_t, 4> ne_b;
  2808. std::string vars() override {
  2809. return VARS_TO_STR3(type, ne_a, ne_b);
  2810. }
  2811. test_acc(ggml_type type = GGML_TYPE_F32,
  2812. std::array<int64_t, 4> ne_a = {256, 17, 1, 1},
  2813. std::array<int64_t, 4> ne_b = {256, 16, 1, 1})
  2814. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  2815. ggml_tensor * build_graph(ggml_context * ctx) override {
  2816. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2817. ggml_set_param(a);
  2818. ggml_set_name(a, "a");
  2819. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  2820. ggml_set_param(b);
  2821. ggml_set_name(b, "b");
  2822. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  2823. ggml_set_name(out, "out");
  2824. return out;
  2825. }
  2826. };
  2827. // GGML_OP_PAD
  2828. struct test_pad : public test_case {
  2829. const ggml_type type;
  2830. const std::array<int64_t, 4> ne_a;
  2831. const int pad_0;
  2832. const int pad_1;
  2833. std::string vars() override {
  2834. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  2835. }
  2836. test_pad(ggml_type type = GGML_TYPE_F32,
  2837. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  2838. int pad_0 = 1, int pad_1 = 1)
  2839. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  2840. ggml_tensor * build_graph(ggml_context * ctx) override {
  2841. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2842. ggml_set_name(a, "a");
  2843. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  2844. ggml_set_name(out, "out");
  2845. return out;
  2846. }
  2847. };
  2848. // GGML_OP_PAD_REFLECT_1D
  2849. struct test_pad_reflect_1d : public test_case {
  2850. const ggml_type type;
  2851. const std::array<int64_t, 4> ne_a;
  2852. const int pad_0;
  2853. const int pad_1;
  2854. std::string vars() override {
  2855. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  2856. }
  2857. test_pad_reflect_1d(ggml_type type = GGML_TYPE_F32,
  2858. std::array<int64_t, 4> ne_a = {512, 34, 2, 1},
  2859. int pad_0 = 10, int pad_1 = 9)
  2860. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  2861. ggml_tensor * build_graph(ggml_context * ctx) override {
  2862. ggml_tensor * a = ggml_new_tensor(ctx, type, 2, ne_a.data());
  2863. ggml_set_name(a, "a");
  2864. ggml_tensor * out = ggml_pad_reflect_1d(ctx, a, pad_0, pad_1);
  2865. ggml_set_name(out, "out");
  2866. return out;
  2867. }
  2868. };
  2869. // GGML_OP_ARANGE
  2870. struct test_arange : public test_case {
  2871. const ggml_type type;
  2872. const float start;
  2873. const float stop;
  2874. const float step;
  2875. std::string vars() override {
  2876. return VARS_TO_STR4(type, start, stop, step);
  2877. }
  2878. test_arange(ggml_type type = GGML_TYPE_F32,
  2879. float start = 0.f, float stop = 10.f, float step = 1.f)
  2880. : type(type), start(start), stop(stop), step(step) {}
  2881. ggml_tensor * build_graph(ggml_context * ctx) override {
  2882. ggml_tensor * out = ggml_arange(ctx, start, stop, step);
  2883. ggml_set_name(out, "out");
  2884. return out;
  2885. }
  2886. };
  2887. // GGML_OP_TIMESTEP_EMBEDDING
  2888. struct test_timestep_embedding : public test_case {
  2889. const ggml_type type;
  2890. const std::array<int64_t, 4> ne_a;
  2891. const int dim;
  2892. const int max_period;
  2893. std::string vars() override {
  2894. return VARS_TO_STR4(type, ne_a, dim, max_period);
  2895. }
  2896. test_timestep_embedding(ggml_type type = GGML_TYPE_F32,
  2897. std::array<int64_t, 4> ne_a = {2, 1, 1, 1},
  2898. int dim = 320, int max_period=10000)
  2899. : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {}
  2900. ggml_tensor * build_graph(ggml_context * ctx) override {
  2901. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2902. ggml_set_name(a, "a");
  2903. ggml_tensor * out = ggml_timestep_embedding(ctx, a, dim, max_period);
  2904. ggml_set_name(out, "out");
  2905. return out;
  2906. }
  2907. };
  2908. // GGML_OP_LEAKY_RELU
  2909. struct test_leaky_relu : public test_case {
  2910. const ggml_type type;
  2911. const std::array<int64_t, 4> ne_a;
  2912. const float negative_slope;
  2913. std::string vars() override {
  2914. return VARS_TO_STR3(type, ne_a, negative_slope);
  2915. }
  2916. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  2917. std::array<int64_t, 4> ne_a = {10, 5, 4, 3},
  2918. float negative_slope = 0.1f)
  2919. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  2920. ggml_tensor * build_graph(ggml_context * ctx) override {
  2921. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  2922. ggml_set_name(a, "a");
  2923. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  2924. ggml_set_name(out, "out");
  2925. return out;
  2926. }
  2927. };
  2928. // GGML_OP_FLASH_ATTN_EXT
  2929. struct test_flash_attn_ext : public test_case {
  2930. const int64_t hsk; // K head size
  2931. const int64_t hsv; // V head size
  2932. const int64_t nh; // num heads
  2933. const std::array<int64_t, 2> nr23; // repeat in dim 2 and 3, tests for grouped-query attention
  2934. const int64_t kv; // kv size
  2935. const int64_t nb; // batch size
  2936. const bool mask; // use mask
  2937. const float max_bias; // ALiBi
  2938. const float logit_softcap; // Gemma 2
  2939. const ggml_prec prec;
  2940. const ggml_type type_KV;
  2941. std::array<int32_t, 4> permute;
  2942. std::string vars() override {
  2943. return VARS_TO_STR12(hsk, hsv, nh, nr23, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, permute);
  2944. }
  2945. double max_nmse_err() override {
  2946. return 5e-4;
  2947. }
  2948. uint64_t op_flops(ggml_tensor * t) override {
  2949. GGML_UNUSED(t);
  2950. // Just counting matmul costs:
  2951. // Q*K^T is nb x hsk x kv, P*V is nb x kv x hsv, per head
  2952. return (2 * nh*nr23[0] * nb * (hsk + hsv) * kv)*nr23[1];
  2953. }
  2954. test_flash_attn_ext(int64_t hsk = 128, int64_t hsv = 128, int64_t nh = 32, std::array<int64_t, 2> nr23 = {1, 1}, int64_t kv = 96, int64_t nb = 8,
  2955. bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_prec prec = GGML_PREC_F32,
  2956. ggml_type type_KV = GGML_TYPE_F16, std::array<int32_t, 4> permute = {0, 1, 2, 3})
  2957. : hsk(hsk), hsv(hsv), nh(nh), nr23(nr23), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), prec(prec), type_KV(type_KV), permute(permute) {}
  2958. ggml_tensor * build_graph(ggml_context * ctx) override {
  2959. const int64_t hsk_padded = GGML_PAD(hsk, ggml_blck_size(type_KV));
  2960. const int64_t hsv_padded = GGML_PAD(hsv, ggml_blck_size(type_KV));
  2961. auto const &create_permuted = [&](ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) -> ggml_tensor * {
  2962. int64_t ne[4] = {ne0, ne1, ne2, ne3};
  2963. int64_t ne_perm[4];
  2964. for (int i = 0; i < 4; ++i) {
  2965. ne_perm[permute[i]] = ne[i];
  2966. }
  2967. ggml_tensor * t = ggml_new_tensor_4d(ctx, type, ne_perm[0], ne_perm[1], ne_perm[2], ne_perm[3]);
  2968. if (permute != std::array<int32_t, 4>{0, 1, 2, 3}) {
  2969. t = ggml_permute(ctx, t, permute[0], permute[1], permute[2], permute[3]);
  2970. }
  2971. return t;
  2972. };
  2973. ggml_tensor * q = create_permuted(GGML_TYPE_F32, hsk_padded, nb, nh*nr23[0], nr23[1]);
  2974. ggml_set_name(q, "q");
  2975. ggml_tensor * k = create_permuted(type_KV, hsk_padded, kv, nh, nr23[1]);
  2976. ggml_set_name(k, "k");
  2977. ggml_tensor * v = create_permuted(type_KV, hsv_padded, kv, nh, nr23[1]);
  2978. ggml_set_name(v, "v");
  2979. ggml_tensor * m = nullptr;
  2980. if (mask) {
  2981. m = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), nr23[1], 1);
  2982. ggml_set_name(m, "m");
  2983. }
  2984. ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hsk), max_bias, logit_softcap);
  2985. ggml_flash_attn_ext_set_prec(out, prec);
  2986. ggml_set_name(out, "out");
  2987. return out;
  2988. }
  2989. bool grad_precise() override {
  2990. return true;
  2991. }
  2992. };
  2993. // GGML_OP_CROSS_ENTROPY_LOSS
  2994. struct test_cross_entropy_loss : public test_case {
  2995. const ggml_type type;
  2996. const std::array<int64_t, 4> ne;
  2997. std::string vars() override {
  2998. return VARS_TO_STR2(type, ne);
  2999. }
  3000. test_cross_entropy_loss(ggml_type type = GGML_TYPE_F32,
  3001. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  3002. : type(type), ne(ne) {}
  3003. ggml_tensor * build_graph(ggml_context * ctx) override {
  3004. ggml_tensor * logits = ggml_new_tensor(ctx, type, 4, ne.data());
  3005. ggml_set_param(logits);
  3006. ggml_set_name(logits, "logits");
  3007. ggml_tensor * labels = ggml_new_tensor(ctx, type, 4, ne.data());
  3008. // The labels are assumed to be constant -> no gradients.
  3009. ggml_set_name(labels, "labels");
  3010. // Ensure labels add up to 1:
  3011. labels = ggml_soft_max(ctx, labels);
  3012. ggml_set_name(labels, "labels_normalized");
  3013. ggml_tensor * out = ggml_cross_entropy_loss(ctx, logits, labels);
  3014. ggml_set_name(out, "out");
  3015. return out;
  3016. }
  3017. void initialize_tensors(ggml_context * ctx) override {
  3018. // For larger abs. diffs between logits softmax is more linear, therefore more precise num. gradients.
  3019. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  3020. init_tensor_uniform(t, -100.0f, 100.0f);
  3021. }
  3022. }
  3023. float grad_eps() override {
  3024. return 1.0f;
  3025. }
  3026. bool grad_precise() override {
  3027. return true;
  3028. }
  3029. };
  3030. // GGML_OP_CROSS_ENTROPY_LOSS_BACK
  3031. struct test_cross_entropy_loss_back : public test_case {
  3032. const ggml_type type;
  3033. const std::array<int64_t, 4> ne;
  3034. std::string vars() override {
  3035. return VARS_TO_STR2(type, ne);
  3036. }
  3037. test_cross_entropy_loss_back(ggml_type type = GGML_TYPE_F32,
  3038. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  3039. : type(type), ne(ne) {}
  3040. ggml_tensor * build_graph(ggml_context * ctx) override {
  3041. ggml_tensor * grad = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  3042. ggml_set_name(grad, "grad");
  3043. ggml_tensor * logits = ggml_new_tensor(ctx, type, 4, ne.data());
  3044. ggml_set_name(logits, "logits");
  3045. ggml_tensor * labels = ggml_new_tensor(ctx, type, 4, ne.data());
  3046. ggml_set_name(labels, "labels");
  3047. // Ensure labels add up to 1:
  3048. labels = ggml_soft_max(ctx, labels);
  3049. ggml_set_name(labels, "labels_normalized");
  3050. ggml_tensor * out = ggml_cross_entropy_loss_back(ctx, grad, logits, labels);
  3051. ggml_set_name(out, "out");
  3052. return out;
  3053. }
  3054. };
  3055. // GGML_OP_OPT_STEP_ADAMW
  3056. struct test_opt_step_adamw : public test_case {
  3057. const ggml_type type;
  3058. const std::array<int64_t, 4> ne;
  3059. std::string vars() override {
  3060. return VARS_TO_STR2(type, ne);
  3061. }
  3062. test_opt_step_adamw(ggml_type type = GGML_TYPE_F32,
  3063. std::array<int64_t, 4> ne = {10, 5, 4, 3})
  3064. : type(type), ne(ne) {}
  3065. ggml_tensor * build_graph(ggml_context * ctx) override {
  3066. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  3067. ggml_set_param(a); // Despite tensor a having gradients the output tensor will not.
  3068. ggml_set_name(a, "a");
  3069. ggml_tensor * grad = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  3070. ggml_set_name(grad, "grad");
  3071. ggml_tensor * grad_m = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  3072. ggml_set_name(grad_m, "grad_m");
  3073. ggml_tensor * grad_v = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]);
  3074. ggml_set_name(grad_v, "grad_v");
  3075. ggml_tensor * adamw_params = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 7);
  3076. ggml_set_name(adamw_params, "adamw_params");
  3077. ggml_tensor * out = ggml_opt_step_adamw(ctx, a, grad, grad_m, grad_v, adamw_params);
  3078. ggml_set_name(out, "out");
  3079. return out;
  3080. }
  3081. void initialize_tensors(ggml_context * ctx) override {
  3082. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  3083. init_tensor_uniform(t, 0.0f, 1.0f); // grad_v and adamw_params need non-negative values.
  3084. }
  3085. }
  3086. bool grad_precise() override {
  3087. return true;
  3088. }
  3089. };
  3090. enum llm_norm_type {
  3091. LLM_NORM,
  3092. LLM_NORM_RMS,
  3093. };
  3094. struct llama_hparams {
  3095. uint32_t n_vocab;
  3096. uint32_t n_embd;
  3097. uint32_t n_head;
  3098. uint32_t n_head_kv;
  3099. static constexpr uint32_t n_layer = 1;
  3100. uint32_t n_rot;
  3101. uint32_t n_embd_head; // dimension of values (d_v)
  3102. uint32_t n_ff;
  3103. float f_norm_eps;
  3104. float f_norm_rms_eps;
  3105. // cparams
  3106. static constexpr uint32_t n_ctx = 512; // user-specified context size
  3107. static constexpr uint32_t n_ctx_orig = n_ctx;
  3108. // batch
  3109. int32_t n_tokens;
  3110. // llm_build_context
  3111. static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx
  3112. static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache
  3113. uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads
  3114. return n_embd_head * n_head_kv;
  3115. }
  3116. };
  3117. // LLM base class
  3118. struct test_llm : public test_case {
  3119. llama_hparams hp;
  3120. protected:
  3121. test_llm(llama_hparams hp)
  3122. : hp(std::move(hp)) {
  3123. }
  3124. public:
  3125. struct ggml_tensor * llm_build_norm(
  3126. struct ggml_context * ctx,
  3127. struct ggml_tensor * cur,
  3128. struct ggml_tensor * mw,
  3129. struct ggml_tensor * mb,
  3130. llm_norm_type type) {
  3131. switch (type) {
  3132. case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break;
  3133. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break;
  3134. }
  3135. cur = ggml_mul(ctx, cur, mw);
  3136. if (mb) {
  3137. cur = ggml_add(ctx, cur, mb);
  3138. }
  3139. return cur;
  3140. }
  3141. void llm_build_kv_store(
  3142. struct ggml_context * ctx,
  3143. struct ggml_tensor * k_l,
  3144. struct ggml_tensor * v_l,
  3145. struct ggml_tensor * k_cur,
  3146. struct ggml_tensor * v_cur) {
  3147. // compute the transposed [n_tokens, n_embd] V matrix
  3148. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens));
  3149. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(),
  3150. (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head);
  3151. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(),
  3152. ( hp.n_ctx)*ggml_element_size(v_l),
  3153. (hp.kv_head)*ggml_element_size(v_l));
  3154. // important: storing RoPE-ed version of K in the KV cache!
  3155. ggml_cpy(ctx, k_cur, k_cache_view);
  3156. ggml_cpy(ctx, v_cur_t, v_cache_view);
  3157. }
  3158. struct ggml_tensor * llm_build_kqv(
  3159. struct ggml_context * ctx,
  3160. struct ggml_tensor * k_l,
  3161. struct ggml_tensor * v_l,
  3162. struct ggml_tensor * q_cur,
  3163. struct ggml_tensor * kq_mask,
  3164. float kq_scale) {
  3165. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  3166. struct ggml_tensor * k =
  3167. ggml_view_3d(ctx, k_l,
  3168. hp.n_embd_head, hp.n_kv, hp.n_head_kv,
  3169. ggml_row_size(k_l->type, hp.n_embd_gqa()),
  3170. ggml_row_size(k_l->type, hp.n_embd_head),
  3171. 0);
  3172. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  3173. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
  3174. // split cached v into n_head heads
  3175. struct ggml_tensor * v =
  3176. ggml_view_3d(ctx, v_l,
  3177. hp.n_kv, hp.n_embd_head, hp.n_head_kv,
  3178. ggml_element_size(v_l)*hp.n_ctx,
  3179. ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head,
  3180. 0);
  3181. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  3182. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  3183. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens);
  3184. struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  3185. cur = ggml_mul_mat(ctx, wo, cur);
  3186. return cur;
  3187. }
  3188. void initialize_tensors(ggml_context * ctx) override {
  3189. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  3190. if (t->type == GGML_TYPE_I32) {
  3191. // pos
  3192. std::vector<int> data(hp.n_tokens);
  3193. for (int i = 0; i < hp.n_tokens; i++) {
  3194. data[i] = rand() % hp.n_ctx;
  3195. }
  3196. ggml_backend_tensor_set(t, data.data(), 0, hp.n_tokens * sizeof(int));
  3197. } else {
  3198. init_tensor_uniform(t);
  3199. }
  3200. }
  3201. }
  3202. };
  3203. // Llama
  3204. struct test_llama : public test_llm {
  3205. static constexpr float freq_base = 10000.0f;
  3206. static constexpr float freq_scale = 1.0f;
  3207. static constexpr float ext_factor = 0.0f;
  3208. static constexpr float attn_factor = 1.0f;
  3209. static constexpr float beta_fast = 32.0f;
  3210. static constexpr float beta_slow = 1.0f;
  3211. bool fused;
  3212. std::string op_desc(ggml_tensor * t) override {
  3213. GGML_UNUSED(t);
  3214. return "LLAMA";
  3215. }
  3216. std::string vars() override {
  3217. auto n_tokens = hp.n_tokens;
  3218. return VARS_TO_STR1(n_tokens);
  3219. }
  3220. double max_nmse_err() override {
  3221. return 2e-3;
  3222. }
  3223. bool run_whole_graph() override { return fused; }
  3224. test_llama(int n_tokens = 1, bool fused = false)
  3225. : test_llm({
  3226. /*n_vocab =*/ 32000,
  3227. /*n_embd =*/ 3200,
  3228. /*n_head =*/ 32,
  3229. /*n_head_kv =*/ 32,
  3230. /*n_rot =*/ 100,
  3231. /*n_embd_head =*/ 100,
  3232. /*n_ff =*/ 8640,
  3233. /*f_norm_eps =*/ 0.f,
  3234. /*f_norm_rms_eps =*/ 1e-5f,
  3235. /*n_tokens =*/ n_tokens,
  3236. })
  3237. , fused(fused)
  3238. {
  3239. }
  3240. ggml_tensor * build_graph(ggml_context * ctx) override {
  3241. struct ggml_tensor * cur;
  3242. struct ggml_tensor * inpL;
  3243. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  3244. // inp_pos - contains the positions
  3245. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  3246. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3247. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  3248. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  3249. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  3250. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  3251. struct ggml_tensor * inpSA = inpL;
  3252. // norm
  3253. ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3254. cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS);
  3255. // self-attention
  3256. {
  3257. ggml_tensor * wq = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd);
  3258. ggml_tensor * wk = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  3259. ggml_tensor * wv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd_gqa());
  3260. // compute Q and K and RoPE them
  3261. struct ggml_tensor * Qcur = ggml_mul_mat(ctx, wq, cur);
  3262. struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur);
  3263. struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur);
  3264. Qcur = ggml_rope_ext(
  3265. ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr,
  3266. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  3267. ext_factor, attn_factor, beta_fast, beta_slow
  3268. );
  3269. Kcur = ggml_rope_ext(
  3270. ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr,
  3271. hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale,
  3272. ext_factor, attn_factor, beta_fast, beta_slow
  3273. );
  3274. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  3275. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  3276. }
  3277. struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA);
  3278. // feed-forward network
  3279. ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3280. cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS);
  3281. ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  3282. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  3283. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  3284. struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur);
  3285. cur = ggml_mul_mat(ctx, ffn_gate, cur);
  3286. cur = ggml_silu(ctx, cur);
  3287. cur = ggml_mul(ctx, cur, tmp);
  3288. cur = ggml_mul_mat(ctx, ffn_down, cur);
  3289. cur = ggml_add(ctx, cur, ffn_inp);
  3290. // input for next layer
  3291. inpL = cur;
  3292. }
  3293. cur = inpL;
  3294. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3295. cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS);
  3296. // lm_head
  3297. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab);
  3298. cur = ggml_mul_mat(ctx, output, cur);
  3299. return cur;
  3300. }
  3301. };
  3302. // Falcon
  3303. struct test_falcon : public test_llm {
  3304. static constexpr float freq_base = 10000.0f;
  3305. static constexpr float freq_scale = 1.0f;
  3306. static constexpr float ext_factor = 0.0f;
  3307. static constexpr float attn_factor = 1.0f;
  3308. static constexpr float beta_fast = 32.0f;
  3309. static constexpr float beta_slow = 1.0f;
  3310. std::string op_desc(ggml_tensor * t) override {
  3311. GGML_UNUSED(t);
  3312. return "FALCON";
  3313. }
  3314. std::string vars() override {
  3315. auto n_tokens = hp.n_tokens;
  3316. return VARS_TO_STR1(n_tokens);
  3317. }
  3318. double max_nmse_err() override {
  3319. return 2e-3;
  3320. }
  3321. test_falcon(int n_tokens = 1)
  3322. : test_llm({
  3323. /*n_vocab =*/ 32000,
  3324. /*n_embd =*/ 3200,
  3325. /*n_head =*/ 50,
  3326. /*n_head_kv =*/ 1,
  3327. /*n_rot =*/ 64,
  3328. /*n_embd_head =*/ 64,
  3329. /*n_ff =*/ 8640,
  3330. /*f_norm_eps =*/ 1e-5f,
  3331. /*f_norm_rms_eps =*/ 0.f,
  3332. /*n_tokens =*/ n_tokens,
  3333. }) {
  3334. }
  3335. ggml_tensor * build_graph(ggml_context * ctx) override {
  3336. struct ggml_tensor * cur;
  3337. struct ggml_tensor * inpL;
  3338. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hp.n_embd, hp.n_tokens);
  3339. // inp_pos - contains the positions
  3340. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, hp.n_tokens);
  3341. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3342. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, hp.n_kv, hp.n_tokens, 1);
  3343. ggml_tensor * k_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  3344. ggml_tensor * v_l = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, 1638400);
  3345. for (uint32_t il = 0; il < hp.n_layer; ++il) {
  3346. // norm
  3347. ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3348. ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3349. ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM);
  3350. // self-attention
  3351. {
  3352. cur = attn_norm;
  3353. ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa());
  3354. cur = ggml_mul_mat(ctx, wqkv, cur);
  3355. struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd)));
  3356. struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd)));
  3357. struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa())));
  3358. Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens);
  3359. Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens);
  3360. // using mode = 2 for neox mode
  3361. Qcur = ggml_rope_ext(
  3362. ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  3363. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3364. );
  3365. Kcur = ggml_rope_ext(
  3366. ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig,
  3367. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3368. );
  3369. llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur);
  3370. cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head)));
  3371. }
  3372. struct ggml_tensor * ffn_inp = cur;
  3373. // feed forward
  3374. {
  3375. ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff);
  3376. ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd);
  3377. cur = attn_norm;
  3378. cur = ggml_mul_mat(ctx, ffn_up, cur);
  3379. cur = ggml_gelu(ctx, cur);
  3380. cur = ggml_mul_mat(ctx, ffn_down, cur);
  3381. }
  3382. cur = ggml_add(ctx, cur, ffn_inp);
  3383. cur = ggml_add(ctx, cur, inpL);
  3384. // input for next layer
  3385. inpL = cur;
  3386. }
  3387. cur = inpL;
  3388. ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3389. ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd);
  3390. cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM);
  3391. // lm_head
  3392. ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab);
  3393. cur = ggml_mul_mat(ctx, output, cur);
  3394. return cur;
  3395. }
  3396. };
  3397. // ###########################################
  3398. // ## Section 3: GGML Op Test Instantiation ##
  3399. // ###########################################
  3400. static const ggml_type all_types[] = {
  3401. GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16,
  3402. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  3403. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  3404. GGML_TYPE_Q8_0,
  3405. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  3406. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  3407. GGML_TYPE_Q6_K,
  3408. // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends
  3409. GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  3410. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  3411. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  3412. };
  3413. static const ggml_type base_types[] = {
  3414. GGML_TYPE_F32, GGML_TYPE_F16,
  3415. GGML_TYPE_Q8_0, // for I8MM tests
  3416. GGML_TYPE_Q4_0,
  3417. GGML_TYPE_Q4_1, // for I8MM tests
  3418. GGML_TYPE_Q4_K,
  3419. GGML_TYPE_IQ2_XXS
  3420. };
  3421. static const ggml_type other_types[] = {
  3422. GGML_TYPE_Q4_1,
  3423. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  3424. GGML_TYPE_Q8_0,
  3425. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  3426. GGML_TYPE_Q5_K,
  3427. GGML_TYPE_Q6_K,
  3428. // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends
  3429. GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
  3430. GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
  3431. GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
  3432. GGML_TYPE_BF16,
  3433. };
  3434. // Test cases for evaluation: should try to cover edge cases while using small input sizes to keep the runtime low
  3435. static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
  3436. std::vector<std::unique_ptr<test_case>> test_cases;
  3437. std::default_random_engine rng(0);
  3438. // unary ops
  3439. for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3440. for (int v : {0, 1}) {
  3441. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  3442. test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 128, 2, 2, 2 }, v));
  3443. test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 5, 7, 11, 13 }, v));
  3444. }
  3445. }
  3446. }
  3447. // glu ops
  3448. for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3449. for (int v : {0, 1}) {
  3450. for (int op = 0; op < GGML_GLU_OP_COUNT; op++) {
  3451. for (bool swapped : {false, true}) {
  3452. test_cases.emplace_back(new test_glu((ggml_glu_op) op, type, { 128, 2, 2, 2 }, v, swapped));
  3453. test_cases.emplace_back(new test_glu((ggml_glu_op) op, type, { 5, 7, 11, 13 }, v, swapped));
  3454. }
  3455. test_cases.emplace_back(new test_glu_split((ggml_glu_op) op, type, { 128, 2, 2, 2 }, v));
  3456. test_cases.emplace_back(new test_glu_split((ggml_glu_op) op, type, { 5, 7, 11, 13 }, v));
  3457. }
  3458. }
  3459. }
  3460. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  3461. for (ggml_type type : all_types) {
  3462. for (int b : {1, 7}) {
  3463. for (bool v : {false, true}) {
  3464. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  3465. }
  3466. }
  3467. }
  3468. for (int b : {1, 7}) {
  3469. for (bool v : {false, true}) {
  3470. test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v));
  3471. }
  3472. }
  3473. test_cases.emplace_back(new test_get_rows_back(GGML_TYPE_F32, 1, 8, 2, 1, false));
  3474. for (ggml_type type : all_types) {
  3475. for (bool v : {false, true}) {
  3476. test_cases.emplace_back(new test_get_rows_back(type, 256, 5, 4, 1, v));
  3477. }
  3478. }
  3479. for (bool v : {false, true}) {
  3480. test_cases.emplace_back(new test_get_rows_back(GGML_TYPE_I32, 256, 5, 4, 1, v));
  3481. }
  3482. test_cases.emplace_back(new test_set_rows(GGML_TYPE_F32, { 1, 8, 1, 3 }, { 1, 1 }, 2, false));
  3483. for (ggml_type type : all_types) {
  3484. for (int b : {1, 7}) {
  3485. for (bool v : {false, true}) {
  3486. test_cases.emplace_back(new test_set_rows(type, { 256, 5, b, 3 }, { 1, 1, }, 1, v));
  3487. test_cases.emplace_back(new test_set_rows(type, { 256, 11, 1, b }, { 2, 3, }, 7, v));
  3488. test_cases.emplace_back(new test_set_rows(type, { 3*ggml_blck_size(type), 3, b, 1 }, { 2, 3, }, 2, v));
  3489. if (ggml_blck_size(type) == 1) {
  3490. test_cases.emplace_back(new test_set_rows(type, { 31, 3, b, 1 }, { 2, 3, }, 2, v));
  3491. test_cases.emplace_back(new test_set_rows(type, { 33, 5, 1, b }, { 2, 3, }, 1, v));
  3492. }
  3493. }
  3494. }
  3495. }
  3496. for (ggml_type type_input : {GGML_TYPE_F32}) {
  3497. for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) {
  3498. for (int k0 : {1, 3}) {
  3499. for (int k1 : {1, 3}) {
  3500. for (int s0 : {1, 2}) {
  3501. for (int s1 : {1, 2}) {
  3502. for (int p0 : {0, 1}) {
  3503. for (int p1 : {0, 1}) {
  3504. test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1));
  3505. }
  3506. }
  3507. }
  3508. }
  3509. }
  3510. }
  3511. }
  3512. }
  3513. // im2col 1D
  3514. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  3515. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  3516. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false));
  3517. for (int s0 : {1, 3}) {
  3518. for (int p0 : {0, 3}) {
  3519. for (int d0 : {1, 3}) {
  3520. test_cases.emplace_back(new test_im2col(
  3521. GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 2, 2, 1}, {3, 2, 2, 1},
  3522. s0, 0, p0, 0, d0, 0, false));
  3523. }
  3524. }
  3525. }
  3526. // im2col 2D
  3527. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32));
  3528. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
  3529. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
  3530. for (int s0 : {1, 3}) {
  3531. for (int s1 : {1, 3}) {
  3532. for (int p0 : {0, 3}) {
  3533. for (int p1 : {0, 3}) {
  3534. for (int d0 : {1, 3}) {
  3535. for (int d1 : {1, 3}) {
  3536. test_cases.emplace_back(new test_im2col(
  3537. GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 20, 2, 2}, {3, 3, 2, 2},
  3538. s0, s1, p0, p1, d0, d1, true));
  3539. }
  3540. }
  3541. }
  3542. }
  3543. }
  3544. }
  3545. // extra tests for im2col 2D
  3546. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 32}, {3, 3, 1, 32}, 1, 1, 1, 1, 1, 1, true));
  3547. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 32}, {3, 3, 2, 32}, 1, 1, 1, 1, 1, 1, true));
  3548. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 1024}, {3, 3, 1, 1024}, 1, 1, 1, 1, 1, 1, true));
  3549. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 1024}, {3, 3, 2, 1024}, 1, 1, 1, 1, 1, 1, true));
  3550. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2048}, {3, 3, 1, 2048}, 1, 1, 1, 1, 1, 1, true));
  3551. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2048}, {3, 3, 2, 2048}, 1, 1, 1, 1, 1, 1, true));
  3552. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2560}, {3, 3, 1, 2560}, 1, 1, 1, 1, 1, 1, true));
  3553. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2560}, {3, 3, 2, 2560}, 1, 1, 1, 1, 1, 1, true));
  3554. // sycl backend will limit task global_range < MAX_INT
  3555. // test cases for 2D im2col with large input W and H (occurs in stable-diffusion)
  3556. // however these cases need to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  3557. // these cases are verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  3558. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  3559. // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true));
  3560. test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, false));
  3561. test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, true));
  3562. test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, false));
  3563. test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, true));
  3564. for(uint32_t Cout : {1, 9}){
  3565. for(uint32_t Cin : {1, 7}){
  3566. for(uint32_t K : {1, 3, 1337}){
  3567. for(uint32_t L : {1, 2, 13}){
  3568. for(uint32_t s0: {1, 2, 3}){
  3569. test_cases.emplace_back(new test_conv_transpose_1d({L,Cin,1,1}, {K,Cout,Cin,1}, s0, 0, 1));
  3570. }
  3571. }
  3572. }
  3573. }
  3574. }
  3575. test_cases.emplace_back(new test_conv_transpose_1d());
  3576. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1));
  3577. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1));
  3578. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1));
  3579. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1));
  3580. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1));
  3581. test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1));
  3582. test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1));
  3583. test_cases.emplace_back(new test_conv_transpose_2d({3, 2, 3, 1}, {2, 2, 1, 3}, 1));
  3584. test_cases.emplace_back(new test_conv_transpose_2d({10, 10, 9, 1}, {3, 3, 1, 9}, 2));
  3585. test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 500, 1, 1}));
  3586. test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 5000, 1, 1}));
  3587. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 1, 1, 1}));
  3588. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {100, 10, 1, 1}));
  3589. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
  3590. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 12, 1, 1}));
  3591. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {2000, 10, 1, 1}));
  3592. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {5438, 3, 1, 1}));
  3593. for (int ne3 : {1, 3}) { // CUDA backward pass only supports ne3 == 1
  3594. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 1}));
  3595. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {2, 1, 1, 1}));
  3596. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 2, 1, 1}));
  3597. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 2, 1}));
  3598. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 2}));
  3599. test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 5, 4, ne3}, {2, 1, 1, 1}));
  3600. test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 5, 4, ne3}, {1, 1, 1, 2}));
  3601. }
  3602. for (bool view : {false, true}) {
  3603. test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 1}, view));
  3604. test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {2, 1, 1, 1}, view));
  3605. test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 2, 1, 1}, view));
  3606. test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 2, 1}, view));
  3607. test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 2}, view));
  3608. }
  3609. test_cases.emplace_back(new test_dup(GGML_TYPE_F32));
  3610. test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
  3611. test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
  3612. test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
  3613. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {0, 2, 1, 3}));
  3614. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {0, 2, 1, 3})); // dup by rows
  3615. test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {1, 0, 2, 3}));
  3616. test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {1, 0, 2, 3})); // dup dst not-contiguous
  3617. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
  3618. test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
  3619. for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) {
  3620. test_cases.emplace_back(new test_set(GGML_TYPE_F32, GGML_TYPE_F32, {6, 5, 4, 3}, dim));
  3621. }
  3622. for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) {
  3623. test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, {6, 5, 4, 3}, dim));
  3624. }
  3625. // same-type copy
  3626. for (ggml_type type : all_types) {
  3627. const auto nk = ggml_blck_size(type);
  3628. for (int k = 1; k < 4; ++k) {
  3629. test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}));
  3630. test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 2, 1, 3}));
  3631. test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 3, 1, 2}, {0, 2, 1, 3}));
  3632. }
  3633. }
  3634. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32}) {
  3635. for (ggml_type type_dst : all_types) {
  3636. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  3637. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
  3638. }
  3639. }
  3640. for (ggml_type type_src : all_types) {
  3641. for (ggml_type type_dst : {GGML_TYPE_F32}) {
  3642. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
  3643. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
  3644. }
  3645. }
  3646. for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3647. for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3648. test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
  3649. }
  3650. }
  3651. test_cases.emplace_back(new test_cont());
  3652. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 1 ,1}));
  3653. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 3 ,5}));
  3654. test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 3, 5 ,7}));
  3655. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 1 ,1}));
  3656. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 3 ,5}));
  3657. test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 3, 5 ,7}));
  3658. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 1 ,1}));
  3659. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 3 ,5}));
  3660. test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 3, 5 ,7}));
  3661. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  3662. for (auto op : {ggml_add, ggml_sub, ggml_mul, ggml_div}) {
  3663. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  3664. }
  3665. };
  3666. for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3667. add_test_bin_bcast(type, {1, 1, 8, 1}, {1, 1, 1, 1});
  3668. add_test_bin_bcast(type, {1, 1, 1, 1}, {32, 1, 1, 1});
  3669. add_test_bin_bcast(type, {1, 1, 320, 320}, {1, 1, 1, 1});
  3670. add_test_bin_bcast(type, {10, 5, 1, 1}, {1, 1, 1, 1});
  3671. add_test_bin_bcast(type, {10, 5, 4, 1}, {1, 1, 1, 1});
  3672. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 1, 1});
  3673. add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 1, 1, 1});
  3674. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 1, 1});
  3675. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 2, 1});
  3676. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 1, 2});
  3677. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 2, 2});
  3678. add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 2, 2});
  3679. add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 2, 2, 2});
  3680. // stable diffusion
  3681. add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 1, 1, 1});
  3682. add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 16, 16, 1});
  3683. add_test_bin_bcast(type, {1280, 16, 16, 1}, {1, 1, 1, 1});
  3684. add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 256, 1, 1});
  3685. add_test_bin_bcast(type, {1, 1, 1280, 1}, {16, 16, 1, 1});
  3686. add_test_bin_bcast(type, {16, 16, 1280, 1}, {1, 1, 1, 1});
  3687. add_test_bin_bcast(type, {1, 1, 1920, 1}, {16, 16, 1, 1});
  3688. add_test_bin_bcast(type, {1, 1, 2560, 1}, {16, 16, 1, 1});
  3689. add_test_bin_bcast(type, {1, 1, 1280, 1}, {32, 32, 1, 1});
  3690. add_test_bin_bcast(type, {1, 1, 1920, 1}, {32, 32, 1, 1});
  3691. add_test_bin_bcast(type, {1, 1, 640, 1}, {32, 32, 1, 1});
  3692. add_test_bin_bcast(type, {5120, 1, 1, 1}, {1, 256, 1, 1});
  3693. add_test_bin_bcast(type, {640, 1, 1, 1}, {1, 1, 1, 1});
  3694. //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  3695. //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  3696. }
  3697. test_cases.emplace_back(new test_add1());
  3698. test_cases.emplace_back(new test_scale());
  3699. test_cases.emplace_back(new test_silu_back());
  3700. for (float eps : {0.0f, 1e-6f, 1e-4f, 1e-1f}) {
  3701. for (bool v : {false, true}) {
  3702. test_cases.emplace_back(new test_norm (GGML_TYPE_F32, {64, 5, 4, 3}, v, eps));
  3703. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 5, 4, 3}, v, eps));
  3704. }
  3705. test_cases.emplace_back(new test_rms_norm_back(GGML_TYPE_F32, {64, 5, 4, 3}, eps));
  3706. test_cases.emplace_back(new test_l2_norm (GGML_TYPE_F32, {64, 5, 4, 3}, eps));
  3707. }
  3708. for (float eps : {0.0f, 1e-6f, 1e-4f, 1e-1f}) {
  3709. test_cases.emplace_back(new test_rms_norm_mul(GGML_TYPE_F32, {64, 5, 4, 3}, eps));
  3710. }
  3711. test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, {64, 5, 4, 3}, 1e-12f));
  3712. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 1, 1}, {4, 1536, 1, 1}));
  3713. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {8, 1536, 1, 1}, {4, 1536, 1, 1}));
  3714. test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 4, 1}, {4, 1536, 1, 1}));
  3715. test_cases.emplace_back(new test_ssm_scan(GGML_TYPE_F32, 16, 1, 1024, 1, 32, 4)); // Mamba-1
  3716. test_cases.emplace_back(new test_ssm_scan(GGML_TYPE_F32, 128, 64, 16, 2, 32, 4)); // Mamba-2
  3717. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 1, 1));
  3718. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 1));
  3719. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 4));
  3720. test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 128, 4));
  3721. test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 1, 1));
  3722. test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 32, 1));
  3723. test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 32, 4));
  3724. test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 128, 4));
  3725. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 1, 1));
  3726. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 1));
  3727. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 4));
  3728. test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 128, 4));
  3729. for (ggml_type type_a : all_types) {
  3730. for (int i = 1; i < 10; ++i) {
  3731. test_cases.emplace_back(new test_mul_mat(type_a, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1}));
  3732. }
  3733. }
  3734. #if 1
  3735. for (ggml_type type_a : base_types) {
  3736. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3737. std::vector<int> ks = { 256 };
  3738. if (ggml_blck_size(type_a) == 1) {
  3739. ks.push_back(4);
  3740. }
  3741. for (auto k : ks) {
  3742. // test cases without permutation
  3743. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {1, 1}));
  3744. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {2, 1}));
  3745. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {1, 2}));
  3746. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 1}, {1, 1}));
  3747. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 1}, {2, 1}));
  3748. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {1, 1}));
  3749. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {2, 1}));
  3750. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {1, 2}));
  3751. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {2, 2}));
  3752. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {1, 1}));
  3753. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {2, 1}));
  3754. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {1, 2}));
  3755. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 1}, {1, 1}));
  3756. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 1}, {2, 1}));
  3757. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {1, 1}));
  3758. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {2, 1}));
  3759. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {1, 2}));
  3760. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {2, 2}));
  3761. // test cases with permutation
  3762. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3763. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3764. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3765. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3766. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3767. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3768. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 2, 1, 3}));
  3769. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 1, 3, 2}));
  3770. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 3, 2, 1}));
  3771. }
  3772. // test cases with large ne00/ne10 to cover stream-k fixup
  3773. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 1024, {3, 2}, {1, 1}));
  3774. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 1024, {3, 2}, {1, 1}));
  3775. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 1024, {3, 2}, {1, 1}));
  3776. }
  3777. }
  3778. for (ggml_type type_a : other_types) {
  3779. for (ggml_type type_b : {GGML_TYPE_F32}) {
  3780. if (ggml_blck_size(type_a) != 256) {
  3781. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, ggml_blck_size(type_a), {1, 1}, {1, 1}));
  3782. }
  3783. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1}));
  3784. }
  3785. }
  3786. #else
  3787. // m = a rows
  3788. // n = b rows
  3789. // k = cols
  3790. std::uniform_int_distribution<> dist_m(1, 128);
  3791. std::uniform_int_distribution<> dist_n(16, 128);
  3792. std::uniform_int_distribution<> dist_k(1, 16);
  3793. for (int i = 0; i < 1000; i++) {
  3794. for (ggml_type type_a : all_types) {
  3795. for (ggml_type type_b : {GGML_TYPE_F32}) {
  3796. int m = dist_m(rng);
  3797. int n = dist_n(rng);
  3798. int k = dist_k(rng) * ggml_blck_size(type_a);
  3799. test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1}, {1, 1}));
  3800. }
  3801. }
  3802. }
  3803. #endif
  3804. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
  3805. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
  3806. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
  3807. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
  3808. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
  3809. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
  3810. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 193, {1, 1}, {4, 1}, {0, 2, 1, 3}));
  3811. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 67, {1, 1}, {4, 1}, {0, 2, 1, 3}));
  3812. for (auto bs : {1,2,4,8}) {
  3813. for (auto nr : {1,4}) {
  3814. for (uint32_t m = 0; m < 2; ++m) {
  3815. for (uint32_t k = 0; k < 2; ++k) {
  3816. for (ggml_type type: {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32}) {
  3817. test_cases.emplace_back(new test_mul_mat(type, GGML_TYPE_F32, 1056 + m, 1, 128 + k, {bs, 1}, {nr, 1}, {0, 2, 1, 3}));
  3818. test_cases.emplace_back(new test_mul_mat(type, GGML_TYPE_F32, 128 + m, 1, 1056 + k, {bs, 1}, {nr, 1}, {0, 1, 2, 3}, true));
  3819. }
  3820. }
  3821. }
  3822. }
  3823. }
  3824. // sycl backend will limit task global_range < MAX_INT
  3825. // test case for f16-type-convert-to-fp32 kernel with large k under fp32 compute dtype (occurs in stable-diffusion)
  3826. // however this case needs to alloc more memory which may fail in some devices (Intel Arc770, etc.)
  3827. // this case is verified (pass) in Intel(R) Data Center GPU Max 1100 (sycl backend) and NV A30 (cuda backend)
  3828. // test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 512, 262144, 9216, {1, 1}, {1, 1}));
  3829. // test large experts*tokens
  3830. for (bool b : {false, true}) {
  3831. test_cases.emplace_back(new test_mul_mat_id(GGML_TYPE_F16, GGML_TYPE_F32, 16, 16, b, 32, 1024, 16));
  3832. }
  3833. for (ggml_type type_a : base_types) {
  3834. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  3835. for (int n_mats : {4, 8}) {
  3836. for (int n_used : {1, 2, 4}) {
  3837. for (bool b : {false, true}) {
  3838. for (int n : {1, 32, 129}) {
  3839. int m = 512;
  3840. int k = 256;
  3841. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  3842. }
  3843. }
  3844. }
  3845. }
  3846. }
  3847. }
  3848. for (ggml_type type_a : other_types) {
  3849. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  3850. for (int n_mats : {4}) {
  3851. for (int n_used : {2}) {
  3852. for (bool b : {false}) {
  3853. for (int n : {1, 32}) {
  3854. int m = 512;
  3855. int k = 256;
  3856. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k));
  3857. }
  3858. }
  3859. }
  3860. }
  3861. }
  3862. }
  3863. for (ggml_type type_a : base_types) {
  3864. for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3865. for (int n : {1, 16}) {
  3866. for (int k : {1, 16}) {
  3867. for (int bs2 : {1, 3}) {
  3868. for (int bs3 : {1, 3}) {
  3869. for (int nr2 : {1, 2}) {
  3870. for (int nr3 : {1, 2}) {
  3871. test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, n, k, {bs2, bs3}, {nr2, nr3}));
  3872. }
  3873. }
  3874. }
  3875. }
  3876. }
  3877. }
  3878. }
  3879. }
  3880. for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) {
  3881. test_cases.emplace_back(new test_sqr(type));
  3882. test_cases.emplace_back(new test_sqrt(type));
  3883. test_cases.emplace_back(new test_log(type));
  3884. test_cases.emplace_back(new test_sin(type));
  3885. test_cases.emplace_back(new test_cos(type));
  3886. test_cases.emplace_back(new test_clamp(type));
  3887. }
  3888. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  3889. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 1}, 5));
  3890. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 2}, 5));
  3891. #if 0
  3892. std::uniform_int_distribution<> dist_ne1(1, 50);
  3893. int exponent = 1;
  3894. while (exponent < (1 << 17)) {
  3895. std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
  3896. for (int n = 0; n < 10; ++n) {
  3897. int64_t ne0 = dist_ne0(rng);
  3898. int64_t ne1 = dist_ne1(rng);
  3899. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
  3900. }
  3901. exponent <<= 1;
  3902. }
  3903. #endif
  3904. for (bool mask : {false, true}) {
  3905. for (float max_bias : {0.0f, 8.0f}) {
  3906. if (!mask && max_bias > 0.0f) continue;
  3907. for (float scale : {1.0f, 0.1f}) {
  3908. for (int64_t ne0 : {16, 1024}) {
  3909. for (int64_t ne1 : {16, 1024}) {
  3910. if (mask) {
  3911. for (ggml_type m_prec : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3912. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, m_prec, {1, 1}, scale, max_bias));
  3913. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, m_prec, {1, 1}, scale, max_bias));
  3914. if (ne0 <= 32 && ne1 <= 32) {
  3915. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, m_prec, {3, 1}, scale, max_bias));
  3916. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, m_prec, {2, 3}, scale, max_bias));
  3917. }
  3918. }
  3919. } else {
  3920. /* The precision of mask here doesn't matter as boolean mask is false */
  3921. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, GGML_TYPE_F32, {1, 1}, scale, max_bias));
  3922. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, GGML_TYPE_F32, {1, 1}, scale, max_bias));
  3923. }
  3924. }
  3925. }
  3926. }
  3927. }
  3928. }
  3929. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F32, {1, 1}, 0.1f, 0.0f));
  3930. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F16, {1, 1}, 0.1f, 0.0f));
  3931. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, GGML_TYPE_F32, {1, 1}, 0.1f, 0.0f));
  3932. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, {1, 1}, 0.1f, 0.0f));
  3933. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, {1, 1}, 0.1f, 0.0f));
  3934. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, {1, 1}, 0.1f, 8.0f));
  3935. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, {1, 1}, 0.1f, 8.0f));
  3936. for (float max_bias : {0.0f, 8.0f}) {
  3937. for (float scale : {1.0f, 0.1f}) {
  3938. for (int64_t ne0 : {16, 1024}) {
  3939. for (int64_t ne1 : {16, 1024}) {
  3940. test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 1, 1}, scale, max_bias));
  3941. test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, scale, max_bias));
  3942. }
  3943. }
  3944. }
  3945. }
  3946. for (bool fw : {true, false}) { // fw == forward
  3947. bool all = true;
  3948. for (float v : { 0, 1 }) {
  3949. for (float fs : { 1.0f, 1.4245f }) {
  3950. for (float ef : { 0.0f, 0.7465f }) {
  3951. for (float af : { 1.0f, 1.4245f }) {
  3952. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  3953. for (bool ff : {false, true}) { // freq_factors
  3954. test_cases.emplace_back(new test_rope(type, {128, 32, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 7B
  3955. if (all) {
  3956. test_cases.emplace_back(new test_rope(type, {128, 40, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 13B
  3957. test_cases.emplace_back(new test_rope(type, {128, 52, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 30B
  3958. test_cases.emplace_back(new test_rope(type, {128, 64, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 65B
  3959. }
  3960. if (all) {
  3961. test_cases.emplace_back(new test_rope(type, { 64, 1, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B)
  3962. test_cases.emplace_back(new test_rope(type, { 64, 71, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B)
  3963. test_cases.emplace_back(new test_rope(type, { 64, 8, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B)
  3964. test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 20, 2, 512, fs, ef, af, ff, v, fw)); // neox (stablelm)
  3965. test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 32, 2, 512, fs, ef, af, ff, v, fw)); // neox (phi-2)
  3966. }
  3967. if (all) {
  3968. test_cases.emplace_back(new test_rope(type, {128, 12, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 2B)
  3969. test_cases.emplace_back(new test_rope(type, {128, 28, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 7B)
  3970. test_cases.emplace_back(new test_rope(type, { 80, 16, 2, 1}, 80, GGML_ROPE_TYPE_VISION, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl ViT)
  3971. }
  3972. test_cases.emplace_back(new test_rope(type, { 64, 128, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B)
  3973. }
  3974. }
  3975. all = false;
  3976. }
  3977. }
  3978. }
  3979. }
  3980. }
  3981. for (int v : { 0, 1, 2, 3 }) {
  3982. for (int dim : { 0, 1, 2, 3, }) {
  3983. test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
  3984. test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
  3985. }
  3986. }
  3987. for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) {
  3988. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  3989. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  3990. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen
  3991. }
  3992. for (ggml_scale_mode mode : {GGML_SCALE_MODE_NEAREST, GGML_SCALE_MODE_BILINEAR}) {
  3993. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, {512, 512, 3, 2}, 2, mode));
  3994. test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, {512, 512, 3, 2}, 2, mode, true));
  3995. test_cases.emplace_back(new test_interpolate(GGML_TYPE_F32, {2, 5, 7, 11}, {5, 7, 11, 13}, mode));
  3996. test_cases.emplace_back(new test_interpolate(GGML_TYPE_F32, {5, 7, 11, 13}, {2, 5, 7, 11}, mode));
  3997. }
  3998. test_cases.emplace_back(new test_interpolate(GGML_TYPE_F32, {2, 5, 7, 11}, {5, 7, 11, 13}, GGML_SCALE_MODE_BILINEAR | GGML_SCALE_FLAG_ALIGN_CORNERS));
  3999. test_cases.emplace_back(new test_sum());
  4000. test_cases.emplace_back(new test_sum_rows());
  4001. test_cases.emplace_back(new test_mean());
  4002. test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {64, 64, 320, 1}));
  4003. test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {9, 9, 1280, 1}));
  4004. test_cases.emplace_back(new test_acc());
  4005. test_cases.emplace_back(new test_pad());
  4006. test_cases.emplace_back(new test_pad_reflect_1d());
  4007. test_cases.emplace_back(new test_arange());
  4008. test_cases.emplace_back(new test_timestep_embedding());
  4009. test_cases.emplace_back(new test_leaky_relu());
  4010. for (int hsk : { 64, 80, 128, 192, 256, 576 }) {
  4011. for (int hsv : { 64, 80, 128, 192, 256, 512 }) {
  4012. if (hsk != 192 && hsk != 576 && hsk != hsv) continue;
  4013. if (hsk == 192 && (hsv != 128 && hsv != 192)) continue;
  4014. if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA
  4015. for (bool mask : { true, false } ) {
  4016. for (float max_bias : { 0.0f, 8.0f }) {
  4017. if (!mask && max_bias > 0.0f) continue;
  4018. for (float logit_softcap : {0.0f, 10.0f}) {
  4019. if (hsk != 128 && logit_softcap != 0.0f) continue;
  4020. for (int nh : { 4, }) {
  4021. for (int nr3 : { 1, 3, }) {
  4022. if (hsk > 64 && nr3 > 1) continue; // skip broadcast for large head sizes
  4023. for (int nr2 : { 1, 4, 16 }) {
  4024. if (nr2 == 16 && hsk != 128) continue;
  4025. for (int kv : { 512, 1024, }) {
  4026. if (nr2 != 1 && kv != 512) continue;
  4027. for (int nb : { 1, 3, 32, 35, }) {
  4028. for (ggml_prec prec : {GGML_PREC_F32, GGML_PREC_DEFAULT}) {
  4029. if (hsk != 128 && prec == GGML_PREC_DEFAULT) continue;
  4030. for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
  4031. test_cases.emplace_back(new test_flash_attn_ext(
  4032. hsk, hsv, nh, {nr2, nr3}, kv, nb, mask, max_bias, logit_softcap, prec, type_KV));
  4033. // run fewer test cases permuted
  4034. if (mask == true && max_bias == 0.0f && logit_softcap == 0 && kv == 512) {
  4035. test_cases.emplace_back(new test_flash_attn_ext(
  4036. hsk, hsv, nh, {nr2, nr3}, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, {0, 2, 1, 3}));
  4037. }
  4038. }
  4039. }
  4040. }
  4041. }
  4042. }
  4043. }
  4044. }
  4045. }
  4046. }
  4047. }
  4048. }
  4049. }
  4050. test_cases.emplace_back(new test_cross_entropy_loss (GGML_TYPE_F32, { 10, 5, 4, 3}));
  4051. test_cases.emplace_back(new test_cross_entropy_loss (GGML_TYPE_F32, {30000, 1, 1, 1}));
  4052. test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, { 10, 5, 4, 3}));
  4053. test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, {30000, 1, 1, 1}));
  4054. test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3}));
  4055. #if 0
  4056. // these tests are disabled to save execution time, sbut they can be handy for debugging
  4057. test_cases.emplace_back(new test_llama(2, true));
  4058. test_cases.emplace_back(new test_llama(1));
  4059. test_cases.emplace_back(new test_llama(2));
  4060. test_cases.emplace_back(new test_falcon(1));
  4061. test_cases.emplace_back(new test_falcon(2));
  4062. #endif
  4063. return test_cases;
  4064. }
  4065. // Test cases for performance evaluation: should be representative of real-world use cases
  4066. static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
  4067. std::vector<std::unique_ptr<test_case>> test_cases;
  4068. test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 1, 1, 1}));
  4069. test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 512, 1, 1}));
  4070. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, {512, 3072, 1, 1}));
  4071. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3}));
  4072. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3}));
  4073. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4074. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {12888, 256, 5, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4075. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4076. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4077. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4078. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4079. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4080. test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, GGML_TYPE_F32, {1, 1}, 1.0f, 0.0f));
  4081. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1}));
  4082. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
  4083. test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1}));
  4084. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16416, 1, 128, {8, 1}, {4, 1}, {0, 2, 1, 3}));
  4085. test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 1, 16416, {8, 1}, {4, 1}, {0, 1, 2, 3}, true));
  4086. for (int bs : {1, 2, 3, 4, 5, 8, 512}) {
  4087. for (ggml_type type_a : all_types) {
  4088. for (ggml_type type_b : {GGML_TYPE_F32}) {
  4089. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1}));
  4090. }
  4091. }
  4092. }
  4093. for (int K : {3, 5}) {
  4094. for (int IC : {256, 2560}) {
  4095. for (int IW_IH : {32, 64, 256}) {
  4096. if (IC == 2560 && IW_IH == 256) {
  4097. // too big
  4098. continue;
  4099. }
  4100. test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {IW_IH, IW_IH, IC, 1}, {K, K, IC, 1}, 1, 1, 1, 1, 1, 1, true));
  4101. }
  4102. }
  4103. }
  4104. for (int kv : { 4096, 8192, 16384, }) {
  4105. for (int hs : { 64, 128, }) {
  4106. for (int nr : { 1, 4, }) {
  4107. test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, {nr, 1}, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16));
  4108. }
  4109. }
  4110. }
  4111. test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, false));
  4112. test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, true));
  4113. test_cases.emplace_back(new test_conv_transpose_2d({256, 256, 256, 1}, {3, 3, 16, 256}, 1));
  4114. test_cases.emplace_back(new test_mean(GGML_TYPE_F32, {256, 256, 3, 1}));
  4115. return test_cases;
  4116. }
  4117. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name, const char * params_filter) {
  4118. auto filter_test_cases = [](std::vector<std::unique_ptr<test_case>> & test_cases, const char * params_filter) {
  4119. if (params_filter == nullptr) {
  4120. return;
  4121. }
  4122. std::regex params_filter_regex(params_filter);
  4123. for (auto it = test_cases.begin(); it != test_cases.end();) {
  4124. if (!std::regex_search((*it)->vars(), params_filter_regex)) {
  4125. it = test_cases.erase(it);
  4126. continue;
  4127. }
  4128. it++;
  4129. }
  4130. };
  4131. if (mode == MODE_TEST) {
  4132. auto test_cases = make_test_cases_eval();
  4133. filter_test_cases(test_cases, params_filter);
  4134. ggml_backend_t backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, NULL);
  4135. if (backend_cpu == NULL) {
  4136. printf(" Failed to initialize CPU backend\n");
  4137. return false;
  4138. }
  4139. size_t n_ok = 0;
  4140. for (auto & test : test_cases) {
  4141. if (test->eval(backend, backend_cpu, op_name)) {
  4142. n_ok++;
  4143. }
  4144. }
  4145. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  4146. ggml_backend_free(backend_cpu);
  4147. return n_ok == test_cases.size();
  4148. }
  4149. if (mode == MODE_GRAD) {
  4150. auto test_cases = make_test_cases_eval();
  4151. filter_test_cases(test_cases, params_filter);
  4152. size_t n_ok = 0;
  4153. for (auto & test : test_cases) {
  4154. if (test->eval_grad(backend, op_name)) {
  4155. n_ok++;
  4156. }
  4157. }
  4158. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  4159. return n_ok == test_cases.size();
  4160. }
  4161. if (mode == MODE_PERF) {
  4162. auto test_cases = make_test_cases_perf();
  4163. filter_test_cases(test_cases, params_filter);
  4164. for (auto & test : test_cases) {
  4165. test->eval_perf(backend, op_name);
  4166. }
  4167. return true;
  4168. }
  4169. GGML_ABORT("fatal error");
  4170. }
  4171. static void usage(char ** argv) {
  4172. printf("Usage: %s [mode] [-o <op>] [-b <backend>] [-p <params regex>]\n", argv[0]);
  4173. printf(" valid modes:\n");
  4174. printf(" - test (default, compare with CPU backend for correctness)\n");
  4175. printf(" - grad (compare gradients from backpropagation with method of finite differences)\n");
  4176. printf(" - perf (performance evaluation)\n");
  4177. printf(" op names for -o are as given by ggml_op_desc() (e.g. ADD, MUL_MAT, etc)\n");
  4178. }
  4179. int main(int argc, char ** argv) {
  4180. test_mode mode = MODE_TEST;
  4181. const char * op_name_filter = nullptr;
  4182. const char * backend_filter = nullptr;
  4183. const char * params_filter = nullptr;
  4184. for (int i = 1; i < argc; i++) {
  4185. if (strcmp(argv[i], "test") == 0) {
  4186. mode = MODE_TEST;
  4187. } else if (strcmp(argv[i], "perf") == 0) {
  4188. mode = MODE_PERF;
  4189. } else if (strcmp(argv[i], "grad") == 0) {
  4190. mode = MODE_GRAD;
  4191. } else if (strcmp(argv[i], "-o") == 0) {
  4192. if (i + 1 < argc) {
  4193. op_name_filter = argv[++i];
  4194. } else {
  4195. usage(argv);
  4196. return 1;
  4197. }
  4198. } else if (strcmp(argv[i], "-b") == 0) {
  4199. if (i + 1 < argc) {
  4200. backend_filter = argv[++i];
  4201. } else {
  4202. usage(argv);
  4203. return 1;
  4204. }
  4205. } else if (strcmp(argv[i], "-p") == 0) {
  4206. if (i + 1 < argc) {
  4207. params_filter = argv[++i];
  4208. } else {
  4209. usage(argv);
  4210. return 1;
  4211. }
  4212. } else {
  4213. usage(argv);
  4214. return 1;
  4215. }
  4216. }
  4217. // load and enumerate backends
  4218. ggml_backend_load_all();
  4219. printf("Testing %zu devices\n\n", ggml_backend_dev_count());
  4220. size_t n_ok = 0;
  4221. for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
  4222. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  4223. printf("Backend %zu/%zu: %s\n", i + 1, ggml_backend_dev_count(), ggml_backend_dev_name(dev));
  4224. if (backend_filter != NULL && strcmp(backend_filter, ggml_backend_dev_name(dev)) != 0) {
  4225. printf(" Skipping\n");
  4226. n_ok++;
  4227. continue;
  4228. }
  4229. if (backend_filter == NULL && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU && mode != MODE_GRAD) {
  4230. printf(" Skipping CPU backend\n");
  4231. n_ok++;
  4232. continue;
  4233. }
  4234. ggml_backend_t backend = ggml_backend_dev_init(dev, NULL);
  4235. GGML_ASSERT(backend != NULL);
  4236. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  4237. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  4238. if (ggml_backend_set_n_threads_fn) {
  4239. // TODO: better value for n_threads
  4240. ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency());
  4241. }
  4242. printf(" Device description: %s\n", ggml_backend_dev_description(dev));
  4243. size_t free, total; // NOLINT
  4244. ggml_backend_dev_memory(dev, &free, &total);
  4245. printf(" Device memory: %zu MB (%zu MB free)\n", total / 1024 / 1024, free / 1024 / 1024);
  4246. printf("\n");
  4247. bool ok = test_backend(backend, mode, op_name_filter, params_filter);
  4248. printf(" Backend %s: ", ggml_backend_name(backend));
  4249. if (ok) {
  4250. printf("\033[1;32mOK\033[0m\n");
  4251. n_ok++;
  4252. } else {
  4253. printf("\033[1;31mFAIL\033[0m\n");
  4254. }
  4255. printf("\n");
  4256. ggml_backend_free(backend);
  4257. }
  4258. ggml_quantize_free();
  4259. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_dev_count());
  4260. if (n_ok != ggml_backend_dev_count()) {
  4261. printf("\033[1;31mFAIL\033[0m\n");
  4262. return 1;
  4263. }
  4264. printf("\033[1;32mOK\033[0m\n");
  4265. return 0;
  4266. }