test-backend-ops.cpp 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687
  1. #include <ggml.h>
  2. #include <ggml-alloc.h>
  3. #include <ggml-backend.h>
  4. #include <ggml-backend-impl.h>
  5. #include <algorithm>
  6. #include <array>
  7. #include <cfloat>
  8. #include <cstring>
  9. #include <functional>
  10. #include <memory>
  11. #include <random>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string>
  15. #include <thread>
  16. #include <vector>
  17. static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
  18. size_t size = ggml_nelements(tensor);
  19. std::vector<float> data(size);
  20. #if 0
  21. std::default_random_engine generator(rd());
  22. std::uniform_real_distribution<float> distribution(min, max);
  23. for (size_t i = 0; i < size; i++) {
  24. data[i] = distribution(generator);
  25. }
  26. #endif
  27. auto init_thread = [&](size_t start, size_t end) {
  28. std::random_device rd;
  29. std::default_random_engine generator(rd());
  30. std::uniform_real_distribution<float> distribution(min, max);
  31. for (size_t i = start; i < end; i++) {
  32. data[i] = distribution(generator);
  33. }
  34. };
  35. size_t n_threads = std::thread::hardware_concurrency();
  36. std::vector<std::thread> threads;
  37. threads.reserve(n_threads);
  38. for (size_t i = 0; i < n_threads; i++) {
  39. size_t start = i*size/n_threads;
  40. size_t end = (i+1)*size/n_threads;
  41. threads.emplace_back(init_thread, start, end);
  42. }
  43. for (auto & t : threads) {
  44. t.join();
  45. }
  46. if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
  47. ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
  48. } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16) {
  49. GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
  50. std::vector<uint8_t> dataq(ggml_type_size(tensor->type)*size/ggml_blck_size(tensor->type));
  51. int64_t hist[16];
  52. ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size, hist);
  53. ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
  54. } else {
  55. GGML_ASSERT(false);
  56. }
  57. }
  58. static std::vector<float> tensor_to_float(const ggml_tensor * t) {
  59. std::vector<float> tv;
  60. tv.reserve(ggml_nelements(t));
  61. std::vector<uint8_t> buf(ggml_nbytes(t));
  62. ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
  63. ggml_type_traits_t tt = ggml_internal_get_type_traits(t->type);
  64. size_t bs = ggml_blck_size(t->type);
  65. // access elements by index to avoid gaps in views
  66. for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
  67. for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
  68. for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
  69. for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
  70. size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
  71. if (t->type == GGML_TYPE_F16) {
  72. tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
  73. } else if (t->type == GGML_TYPE_F32) {
  74. tv.push_back(*(float *) &buf[i]);
  75. } else if (t->type == GGML_TYPE_I32) {
  76. tv.push_back((float)*(int32_t *) &buf[i]);
  77. } else if (ggml_is_quantized(t->type)) {
  78. std::vector<float> vq(ggml_blck_size(t->type));
  79. tt.to_float(&buf[i], vq.data(), ggml_blck_size(t->type));
  80. tv.insert(tv.end(), vq.begin(), vq.end());
  81. } else {
  82. GGML_ASSERT(false);
  83. }
  84. }
  85. }
  86. }
  87. }
  88. return tv;
  89. }
  90. /*
  91. static double cosine_similarity(const float * v1, const float * v2, size_t n) {
  92. double dot = 0.0;
  93. double mag1 = 0.0;
  94. double mag2 = 0.0;
  95. for (size_t i = 0; i < n; i++) {
  96. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  97. return -1.0f;
  98. }
  99. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  100. continue;
  101. }
  102. dot += v1[i]*v2[i];
  103. mag1 += v1[i]*v1[i];
  104. mag2 += v2[i]*v2[i];
  105. }
  106. return dot/sqrt(mag1*mag2);
  107. }
  108. static float distance(const float * v1, const float * v2, size_t n) {
  109. double d = 0.0;
  110. for (size_t i = 0; i < n; i++) {
  111. if (std::isnan(v1[i]) || std::isnan(v2[i])) {
  112. return INFINITY;
  113. }
  114. if (std::isinf(v1[i]) && std::isinf(v2[i])) {
  115. continue;
  116. }
  117. d += (v1[i] - v2[i])*(v1[i] - v2[i]);
  118. }
  119. return sqrt(d);
  120. }
  121. static float vec_len(const float * v, size_t n) {
  122. double d = 0.0;
  123. for (size_t i = 0; i < n; i++) {
  124. if (std::isnan(v[i])) {
  125. return INFINITY;
  126. }
  127. if (std::isinf(v[i])) {
  128. continue;
  129. }
  130. d += v[i]*v[i];
  131. }
  132. return sqrt(d);
  133. }
  134. */
  135. // normalized mean squared error = mse(a, b) / mse(a, 0)
  136. static double nmse(const float * a, const float * b, size_t n) {
  137. double mse_a_b = 0.0;
  138. double mse_a_0 = 0.0;
  139. for (size_t i = 0; i < n; i++) {
  140. float a_i = a[i];
  141. float b_i = b[i];
  142. mse_a_b += (a_i - b_i) * (a_i - b_i);
  143. mse_a_0 += a_i * a_i;
  144. }
  145. return mse_a_b / mse_a_0;
  146. }
  147. // utils for printing the variables of the test cases
  148. #define VAR_TO_STR(x) (#x "=" + var_to_str(x))
  149. template<typename T>
  150. static std::string var_to_str(const T & x) {
  151. return std::to_string(x);
  152. }
  153. template<typename T, size_t N>
  154. static std::string var_to_str(const T (&x)[N]) {
  155. std::string s = "[";
  156. for (size_t i = 0; i < N; i++) {
  157. if (i > 0) {
  158. s += ",";
  159. }
  160. s += var_to_str(x[i]);
  161. }
  162. s += "]";
  163. return s;
  164. }
  165. template<typename T, size_t N>
  166. static std::string var_to_str(const std::array<T, N> & x) {
  167. std::string s = "[";
  168. for (size_t i = 0; i < N; i++) {
  169. if (i > 0) {
  170. s += ",";
  171. }
  172. s += var_to_str(x[i]);
  173. }
  174. s += "]";
  175. return s;
  176. }
  177. //static std::string var_to_str(ggml_unary_op unary_op) {
  178. // return ggml_unary_op_name(unary_op);
  179. //}
  180. static std::string var_to_str(ggml_type type) {
  181. return ggml_type_name(type);
  182. }
  183. #define VARS_TO_STR1(a) VAR_TO_STR(a)
  184. #define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
  185. #define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
  186. #define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
  187. #define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
  188. #define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
  189. #define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
  190. #define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
  191. #define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
  192. #define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
  193. #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
  194. // accept FLT_MAX as infinity
  195. static bool isinf_or_max(float f) {
  196. return std::isinf(f) || f == FLT_MAX || f == -FLT_MAX;
  197. }
  198. static bool ggml_is_view_op(enum ggml_op op) {
  199. return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
  200. }
  201. enum test_mode {
  202. MODE_TEST,
  203. MODE_PERF,
  204. };
  205. struct test_case {
  206. virtual ~test_case() {}
  207. virtual std::string op_desc(ggml_tensor * t) {
  208. return ggml_op_desc(t);
  209. }
  210. virtual std::string vars() {
  211. return "";
  212. }
  213. virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
  214. virtual double max_nmse_err() {
  215. return 1e-7;
  216. }
  217. virtual void initialize_tensors(ggml_context * ctx) {
  218. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  219. init_tensor_uniform(t);
  220. }
  221. }
  222. virtual size_t op_size(ggml_tensor * t) {
  223. size_t size = ggml_nbytes(t);
  224. // add source tensors
  225. for (int i = 0; i < GGML_MAX_SRC; i++) {
  226. if (t->src[i] != NULL) {
  227. size += ggml_nbytes(t->src[i]);
  228. }
  229. }
  230. return size;
  231. }
  232. ggml_cgraph * gf = nullptr;
  233. static const int sentinel_size = 1024;
  234. test_mode mode;
  235. std::vector<ggml_tensor *> sentinels;
  236. void add_sentinel(ggml_context * ctx) {
  237. if (mode == MODE_PERF) {
  238. return;
  239. }
  240. ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
  241. ggml_format_name(sentinel, "sent_%zu", sentinels.size());
  242. sentinels.push_back(sentinel);
  243. }
  244. // hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
  245. ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
  246. ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
  247. add_sentinel(ctx);
  248. return t;
  249. }
  250. ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
  251. ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
  252. add_sentinel(ctx);
  253. return t;
  254. }
  255. ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
  256. ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
  257. add_sentinel(ctx);
  258. return t;
  259. }
  260. ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
  261. ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
  262. add_sentinel(ctx);
  263. return t;
  264. }
  265. ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
  266. ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
  267. add_sentinel(ctx);
  268. return t;
  269. }
  270. bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
  271. mode = MODE_TEST;
  272. ggml_init_params params = {
  273. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  274. /* .mem_base = */ NULL,
  275. /* .no_alloc = */ true,
  276. };
  277. ggml_context * ctx = ggml_init(params);
  278. gf = ggml_new_graph(ctx);
  279. // pre-graph sentinel
  280. add_sentinel(ctx);
  281. ggml_tensor * out = build_graph(ctx);
  282. if (op_name != nullptr && op_desc(out) != op_name) {
  283. //printf(" %s: skipping\n", op_desc(out).c_str());
  284. ggml_free(ctx);
  285. return true;
  286. }
  287. printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  288. fflush(stdout);
  289. // check if backends support op
  290. for (ggml_backend_t backend : {backend1, backend2}) {
  291. if (!ggml_backend_supports_op(backend, out)) {
  292. printf("not supported\n");
  293. ggml_free(ctx);
  294. return true;
  295. }
  296. }
  297. // post-graph sentinel
  298. add_sentinel(ctx);
  299. // allocate
  300. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
  301. // build graph
  302. ggml_build_forward_expand(gf, out);
  303. // add sentinels as graph nodes so that they are checked in the callback
  304. for (ggml_tensor * sentinel : sentinels) {
  305. gf->nodes[gf->n_nodes++] = sentinel;
  306. }
  307. // randomize tensors
  308. initialize_tensors(ctx);
  309. // compare
  310. struct callback_userdata {
  311. bool ok;
  312. double max_err;
  313. };
  314. callback_userdata ud {
  315. true,
  316. max_nmse_err(),
  317. };
  318. auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
  319. callback_userdata * ud = (callback_userdata *) user_data;
  320. if (t1->op == GGML_OP_NONE) {
  321. // sentinels must be unchanged
  322. std::vector<uint8_t> t1_data(ggml_nbytes(t1));
  323. std::vector<uint8_t> t2_data(ggml_nbytes(t2));
  324. ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
  325. ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
  326. if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
  327. printf("sentinel mismatch: %s ", t1->name);
  328. ud->ok = false;
  329. return true;
  330. }
  331. }
  332. std::vector<float> f1 = tensor_to_float(t1);
  333. std::vector<float> f2 = tensor_to_float(t2);
  334. for (size_t i = 0; i < f1.size(); i++) {
  335. // check for nans
  336. if (std::isnan(f1[i]) || std::isnan(f2[i])) {
  337. printf("[%s] NaN at index %zu (%f %f) ", ggml_op_desc(t1), i, f1[i], f2[i]);
  338. ud->ok = false;
  339. return true;
  340. }
  341. // check for infs: both must be inf of the same sign, or both must be finite
  342. if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
  343. if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
  344. if (std::signbit(f1[i]) != std::signbit(f2[i])) {
  345. printf("[%s] inf sign mismatch: %f %f ", ggml_op_desc(t1), f1[i], f2[i]);
  346. ud->ok = false;
  347. return true;
  348. }
  349. } else {
  350. printf("[%s] inf mismatch: %f %f ", ggml_op_desc(t1), f1[i], f2[i]);
  351. ud->ok = false;
  352. return true;
  353. }
  354. }
  355. }
  356. double err = nmse(f1.data(), f2.data(), f1.size());
  357. if (err > ud->max_err) {
  358. printf("[%s] NMSE = %f ", ggml_op_desc(t1), err);
  359. //for (int i = 0; i < f1.size(); i++) {
  360. // printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
  361. //}
  362. //printf("\n");
  363. //exit(1);
  364. ud->ok = false;
  365. }
  366. return true;
  367. GGML_UNUSED(index);
  368. };
  369. ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
  370. if (ud.ok) {
  371. printf("\033[1;32mOK\033[0m\n");
  372. } else {
  373. printf("\033[1;31mFAIL\033[0m\n");
  374. }
  375. ggml_backend_buffer_free(buf);
  376. ggml_free(ctx);
  377. return ud.ok;
  378. }
  379. bool eval_perf(ggml_backend_t backend, const char * op_name) {
  380. mode = MODE_PERF;
  381. static const size_t graph_nodes = 8192;
  382. ggml_init_params params = {
  383. /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
  384. /* .mem_base = */ NULL,
  385. /* .no_alloc = */ true,
  386. };
  387. ggml_context * ctx = ggml_init(params);
  388. ggml_tensor * out = build_graph(ctx);
  389. if (op_name != nullptr && op_desc(out) != op_name) {
  390. //printf(" %s: skipping\n", op_desc(out).c_str());
  391. ggml_free(ctx);
  392. return true;
  393. }
  394. int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
  395. fflush(stdout);
  396. // check if backends support op
  397. if (!ggml_backend_supports_op(backend, out)) {
  398. printf("not supported\n");
  399. ggml_free(ctx);
  400. return true;
  401. }
  402. // align while also leaving some margin for variations in parameters
  403. int align = 20;
  404. int last = (len + align - 1) / align * align;
  405. if (last - len < 5) {
  406. last += align;
  407. }
  408. last = std::max(last, 60);
  409. printf("%*s", last - len, "");
  410. // allocate
  411. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
  412. // randomize tensors
  413. initialize_tensors(ctx);
  414. // build graph
  415. ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
  416. ggml_build_forward_expand(gf, out);
  417. // warmup run
  418. ggml_backend_graph_compute(backend, gf);
  419. // duplicate the op
  420. size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
  421. int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
  422. for (int i = 1; i < n_runs; i++) {
  423. gf->nodes[gf->n_nodes++] = out;
  424. }
  425. // calculate memory
  426. size_t mem = n_runs * op_size(out);
  427. auto tensor_op_size = [](ggml_tensor * t) {
  428. size_t size = ggml_nbytes(t);
  429. // add source tensors
  430. for (int i = 0; i < GGML_MAX_SRC; i++) {
  431. if (t->src[i] != NULL) {
  432. size += ggml_nbytes(t->src[i]);
  433. }
  434. }
  435. return size;
  436. };
  437. for (int i = 0; i < gf->n_nodes; i++) {
  438. if (ggml_is_view_op(gf->nodes[i]->op) || gf->nodes[i] == out) {
  439. continue;
  440. }
  441. mem += tensor_op_size(gf->nodes[i]);
  442. }
  443. // run
  444. ggml_backend_synchronize(backend);
  445. int64_t start_time = ggml_time_us();
  446. ggml_backend_graph_compute(backend, gf);
  447. ggml_backend_synchronize(backend);
  448. int64_t end_time = ggml_time_us();
  449. double time_us = end_time - start_time;
  450. printf(" %5d runs - %8.2f us/run - %8zu kB/run - \033[1;34m%7.2f GB/s\033[0m\n",
  451. n_runs,
  452. time_us / n_runs,
  453. op_size(out) / 1024,
  454. mem / (time_us/1e6) / 1024.0 / 1024.0 / 1024.0);
  455. ggml_backend_buffer_free(buf);
  456. ggml_free(ctx);
  457. return true;
  458. }
  459. };
  460. // GGML_OP_UNARY
  461. struct test_unary : public test_case {
  462. const ggml_unary_op op;
  463. const ggml_type type;
  464. const std::array<int64_t, 4> ne;
  465. std::string vars() override {
  466. return VARS_TO_STR2(type, ne);
  467. }
  468. test_unary(ggml_unary_op op,
  469. ggml_type type = GGML_TYPE_F32,
  470. std::array<int64_t, 4> ne = {128, 10, 10, 10})
  471. : op(op), type(type), ne(ne) {}
  472. ggml_tensor * build_graph(ggml_context * ctx) override {
  473. ggml_tensor * in = ggml_new_tensor(ctx, type, 4, ne.data());
  474. ggml_tensor * out = ggml_unary(ctx, in, op);
  475. return out;
  476. }
  477. };
  478. // GGML_OP_GET_ROWS
  479. struct test_get_rows : public test_case {
  480. const ggml_type type;
  481. const int n; // cols
  482. const int m; // rows
  483. const int r; // rows to get
  484. const int b; // batch size
  485. const bool v; // view (non-contiguous src1)
  486. std::string vars() override {
  487. return VARS_TO_STR6(type, n, m, r, b, v);
  488. }
  489. test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false)
  490. : type(type), n(n), m(m), r(r), b(b), v(v) {}
  491. ggml_tensor * build_graph(ggml_context * ctx) override {
  492. ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b);
  493. ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b);
  494. if (v) {
  495. rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0);
  496. }
  497. ggml_tensor * out = ggml_get_rows(ctx, in, rows);
  498. return out;
  499. }
  500. void initialize_tensors(ggml_context * ctx) override {
  501. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  502. if (t->type == GGML_TYPE_I32) {
  503. if (ggml_is_view_op(t->op)) { continue; }
  504. // rows
  505. std::vector<int> data(r*b);
  506. for (int i = 0; i < r*b; i++) {
  507. data[i] = rand() % m;
  508. }
  509. ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int));
  510. } else {
  511. init_tensor_uniform(t);
  512. }
  513. }
  514. }
  515. };
  516. // GGML_OP_REPEAT
  517. struct test_repeat : public test_case {
  518. const ggml_type type;
  519. const std::array<int64_t, 4> ne;
  520. const std::array<int, 4> nr;
  521. std::string vars() override {
  522. return VARS_TO_STR3(type, ne, nr);
  523. }
  524. size_t op_size(ggml_tensor * t) override {
  525. return ggml_nbytes(t) * 2;
  526. }
  527. test_repeat(ggml_type type = GGML_TYPE_F32,
  528. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  529. std::array<int, 4> nr = {2, 2, 2, 2})
  530. : type(type), ne(ne), nr(nr) {}
  531. ggml_tensor * build_graph(ggml_context * ctx) override {
  532. ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  533. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  534. ggml_tensor * out = ggml_repeat(ctx, src, target);
  535. return out;
  536. }
  537. };
  538. // GGML_OP_DUP
  539. struct test_dup : public test_case {
  540. const ggml_type type;
  541. const std::array<int64_t, 4> ne;
  542. std::string vars() override {
  543. return VARS_TO_STR2(type, ne);
  544. }
  545. test_dup(ggml_type type = GGML_TYPE_F32,
  546. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  547. : type(type), ne(ne) {}
  548. ggml_tensor * build_graph(ggml_context * ctx) override {
  549. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  550. ggml_tensor * out = ggml_dup(ctx, src);
  551. return out;
  552. }
  553. };
  554. // GGML_OP_CPY
  555. struct test_cpy : public test_case {
  556. const ggml_type type_src;
  557. const ggml_type type_dst;
  558. const std::array<int64_t, 4> ne;
  559. std::string vars() override {
  560. return VARS_TO_STR3(type_src, type_dst, ne);
  561. }
  562. size_t op_size(ggml_tensor * t) override {
  563. return ggml_nbytes(t) + ggml_nbytes(t->src[0]);
  564. }
  565. test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
  566. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  567. : type_src(type_src), type_dst(type_dst), ne(ne) {}
  568. ggml_tensor * build_graph(ggml_context * ctx) override {
  569. ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
  570. ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, ne.data());
  571. ggml_tensor * out = ggml_cpy(ctx, src, dst);
  572. return out;
  573. }
  574. };
  575. // GGML_OP_CONT
  576. struct test_cont : public test_case {
  577. const ggml_type type;
  578. const std::array<int64_t, 4> ne;
  579. std::string vars() override {
  580. return VARS_TO_STR2(type, ne);
  581. }
  582. test_cont(ggml_type type = GGML_TYPE_F32,
  583. std::array<int64_t, 4> ne = {10, 10, 10, 1})
  584. : type(type), ne(ne) {}
  585. ggml_tensor * build_graph(ggml_context * ctx) override {
  586. ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data());
  587. src = ggml_transpose(ctx, src);
  588. ggml_tensor * out = ggml_cont(ctx, src);
  589. return out;
  590. }
  591. };
  592. // GGML_OP_ADD
  593. // GGML_OP_MUL
  594. // GGML_OP_DIV
  595. struct test_bin_bcast : public test_case {
  596. using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *);
  597. op_t op;
  598. const ggml_type type;
  599. const std::array<int64_t, 4> ne;
  600. const std::array<int, 4> nr;
  601. std::string vars() override {
  602. return VARS_TO_STR3(type, ne, nr);
  603. }
  604. size_t op_size(ggml_tensor * t) override {
  605. return ggml_nbytes(t) * 3;
  606. }
  607. test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32,
  608. std::array<int64_t, 4> ne = {10, 10, 1, 1},
  609. std::array<int, 4> nr = {1, 2, 1, 1})
  610. : op(op), type(type), ne(ne), nr(nr) {}
  611. ggml_tensor * build_graph(ggml_context * ctx) override {
  612. ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]);
  613. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
  614. ggml_tensor * out = op(ctx, a, b);
  615. return out;
  616. }
  617. void initialize_tensors(ggml_context * ctx) override {
  618. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  619. if (op == ggml_div) {
  620. // avoid division by zero
  621. init_tensor_uniform(t, 1.0f, 2.0f);
  622. } else {
  623. init_tensor_uniform(t);
  624. }
  625. }
  626. }
  627. };
  628. // GGML_OP_SCALE
  629. struct test_scale : public test_case {
  630. const ggml_type type;
  631. const std::array<int64_t, 4> ne;
  632. std::string vars() override {
  633. return VARS_TO_STR2(type, ne);
  634. }
  635. test_scale(ggml_type type = GGML_TYPE_F32,
  636. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  637. : type(type), ne(ne) {}
  638. ggml_tensor * build_graph(ggml_context * ctx) override {
  639. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  640. ggml_tensor * scale = ggml_new_tensor_1d(ctx, type, 1);
  641. ggml_tensor * out = ggml_scale(ctx, a, scale);
  642. return out;
  643. }
  644. };
  645. // GGML_OP_NORM
  646. struct test_norm : public test_case {
  647. const ggml_type type;
  648. const std::array<int64_t, 4> ne;
  649. float eps;
  650. std::string vars() override {
  651. return VARS_TO_STR3(type, ne, eps);
  652. }
  653. test_norm(ggml_type type = GGML_TYPE_F32,
  654. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  655. float eps = 1e-6f)
  656. : type(type), ne(ne), eps(eps) {}
  657. ggml_tensor * build_graph(ggml_context * ctx) override {
  658. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  659. ggml_tensor * out = ggml_norm(ctx, a, eps);
  660. return out;
  661. }
  662. };
  663. // GGML_OP_RMS_NORM
  664. struct test_rms_norm : public test_case {
  665. const ggml_type type;
  666. const std::array<int64_t, 4> ne;
  667. float eps;
  668. std::string vars() override {
  669. return VARS_TO_STR3(type, ne, eps);
  670. }
  671. test_rms_norm(ggml_type type = GGML_TYPE_F32,
  672. std::array<int64_t, 4> ne = {64, 10, 10, 10},
  673. float eps = 1e-6f)
  674. : type(type), ne(ne), eps(eps) {}
  675. ggml_tensor * build_graph(ggml_context * ctx) override {
  676. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  677. ggml_tensor * out = ggml_rms_norm(ctx, a, eps);
  678. return out;
  679. }
  680. };
  681. // GGML_OP_MUL_MAT
  682. struct test_mul_mat : public test_case {
  683. const ggml_type type_a;
  684. const ggml_type type_b;
  685. const int64_t m;
  686. const int64_t n;
  687. const int64_t k;
  688. const std::array<int64_t, 2> bs; // dims 3 and 4
  689. const std::array<int64_t, 2> nr; // repeat in dims 3 and 4
  690. std::string vars() override {
  691. return VARS_TO_STR7(type_a, type_b, m, n, k, bs, nr);
  692. }
  693. double max_nmse_err() override {
  694. return 5e-4;
  695. }
  696. size_t op_size(ggml_tensor * t) override {
  697. size_t a = ggml_nbytes(t->src[0]) * n * nr[0] * nr[1];
  698. size_t b = ggml_nbytes(t->src[1]) * m;
  699. size_t c = ggml_nbytes(t);
  700. return a + b + c;
  701. GGML_UNUSED(t);
  702. }
  703. test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  704. int64_t m = 32, int64_t n = 32, int64_t k = 32,
  705. std::array<int64_t, 2> bs = {10, 10},
  706. std::array<int64_t, 2> nr = {2, 2})
  707. : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr) {}
  708. ggml_tensor * build_graph(ggml_context * ctx) override {
  709. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  710. ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0] , bs[1]);
  711. ggml_tensor * b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]);
  712. ggml_tensor * out = ggml_mul_mat(ctx, a, b);
  713. return out;
  714. }
  715. };
  716. // GGML_OP_MUL_MAT_ID
  717. struct test_mul_mat_id : public test_case {
  718. const ggml_type type_a;
  719. const ggml_type type_b;
  720. const int n_mats;
  721. const int id;
  722. const int64_t m;
  723. const int64_t n;
  724. const int64_t k;
  725. const bool v; // view (non-contiguous ids)
  726. std::string vars() override {
  727. return VARS_TO_STR8(type_a, type_b, n_mats, id, m, n, k, v);
  728. }
  729. double max_nmse_err() override {
  730. return 5e-4;
  731. }
  732. size_t op_size(ggml_tensor * t) override {
  733. size_t a = ggml_nbytes(t->src[2]) * n;
  734. size_t b = ggml_nbytes(t->src[1]) * m;
  735. size_t c = ggml_nbytes(t);
  736. return a + b + c;
  737. GGML_UNUSED(t);
  738. }
  739. test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32,
  740. int n_mats = 2, int id = 0,
  741. int64_t m = 32, int64_t n = 32, int64_t k = 32, bool v = false)
  742. : type_a(type_a), type_b(type_b), n_mats(n_mats), id(id),
  743. m(m), n(n), k(k), v(v) {}
  744. ggml_tensor * build_graph(ggml_context * ctx) override {
  745. // C^T = A * B^T: (k, m) * (k, n) => (m, n)
  746. std::vector<ggml_tensor *> mats;
  747. for (int i = 0; i < n_mats; i++) {
  748. ggml_tensor * a = ggml_new_tensor_2d(ctx, type_a, k, m);
  749. mats.push_back(a);
  750. }
  751. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n);
  752. if (v) {
  753. ids = ggml_view_2d(ctx, ids, n_mats/2, ids->ne[1], ids->nb[1], 0);
  754. }
  755. ggml_tensor * b = ggml_new_tensor_2d(ctx, type_b, k, n);
  756. ggml_tensor * out = ggml_mul_mat_id(ctx, mats.data(), n_mats, ids, v ? id/2 : id, b);
  757. return out;
  758. }
  759. void initialize_tensors(ggml_context * ctx) override {
  760. std::random_device rd;
  761. std::default_random_engine rng(rd());
  762. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  763. if (t->type == GGML_TYPE_I32) {
  764. if (ggml_is_view_op(t->op)) { continue; }
  765. // ids
  766. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  767. std::vector<int32_t> data(t->ne[0]);
  768. for (int i = 0; i < t->ne[0]; i++) {
  769. data[i] = i % n_mats;
  770. }
  771. std::shuffle(data.begin(), data.end(), rng);
  772. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(int32_t));
  773. }
  774. } else {
  775. init_tensor_uniform(t);
  776. }
  777. }
  778. }
  779. };
  780. // GGML_OP_SQR
  781. struct test_sqr : public test_case {
  782. const ggml_type type;
  783. const std::array<int64_t, 4> ne;
  784. std::string vars() override {
  785. return VARS_TO_STR2(type, ne);
  786. }
  787. test_sqr(ggml_type type = GGML_TYPE_F32,
  788. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  789. : type(type), ne(ne) {}
  790. ggml_tensor * build_graph(ggml_context * ctx) override {
  791. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  792. ggml_tensor * out = ggml_sqr(ctx, a);
  793. return out;
  794. }
  795. };
  796. // GGML_OP_CLAMP
  797. struct test_clamp : public test_case {
  798. const ggml_type type;
  799. const std::array<int64_t, 4> ne;
  800. float min;
  801. float max;
  802. std::string vars() override {
  803. return VARS_TO_STR4(type, ne, min, max);
  804. }
  805. test_clamp(ggml_type type = GGML_TYPE_F32,
  806. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  807. float min = -0.5f, float max = 0.5f)
  808. : type(type), ne(ne), min(min), max(max) {}
  809. ggml_tensor * build_graph(ggml_context * ctx) override {
  810. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  811. ggml_tensor * out = ggml_clamp(ctx, a, min, max);
  812. return out;
  813. }
  814. };
  815. // GGML_OP_DIAG_MASK_INF
  816. struct test_diag_mask_inf : public test_case {
  817. const ggml_type type;
  818. const std::array<int64_t, 4> ne;
  819. const int n_past;
  820. std::string vars() override {
  821. return VARS_TO_STR3(type, ne, n_past);
  822. }
  823. test_diag_mask_inf(ggml_type type = GGML_TYPE_F32,
  824. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  825. int n_past = 5)
  826. : type(type), ne(ne), n_past(n_past) {}
  827. ggml_tensor * build_graph(ggml_context * ctx) override {
  828. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  829. ggml_tensor * out = ggml_diag_mask_inf(ctx, a, n_past);
  830. return out;
  831. }
  832. };
  833. // GGML_OP_SOFT_MAX
  834. struct test_soft_max : public test_case {
  835. const ggml_type type;
  836. const std::array<int64_t, 4> ne;
  837. std::string vars() override {
  838. return VARS_TO_STR2(type, ne);
  839. }
  840. test_soft_max(ggml_type type = GGML_TYPE_F32,
  841. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  842. : type(type), ne(ne) {}
  843. ggml_tensor * build_graph(ggml_context * ctx) override {
  844. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  845. ggml_tensor * out = ggml_soft_max(ctx, a);
  846. return out;
  847. }
  848. };
  849. // GGML_OP_ROPE
  850. struct test_rope : public test_case {
  851. const ggml_type type;
  852. const std::array<int64_t, 4> ne;
  853. int n_dims;
  854. int mode;
  855. int n_ctx;
  856. std::string vars() override {
  857. return VARS_TO_STR5(type, ne, n_dims, mode, n_ctx);
  858. }
  859. test_rope(ggml_type type = GGML_TYPE_F32,
  860. std::array<int64_t, 4> ne = {10, 10, 10, 1},
  861. int n_dims = 10, int mode = 0, int n_ctx = 512)
  862. : type(type), ne(ne), n_dims(n_dims), mode(mode), n_ctx(n_ctx) {}
  863. ggml_tensor * build_graph(ggml_context * ctx) override {
  864. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  865. ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne[2]);
  866. ggml_tensor * out = ggml_rope(ctx, a, pos, n_dims, mode, n_ctx);
  867. return out;
  868. }
  869. void initialize_tensors(ggml_context * ctx) override {
  870. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  871. if (t->type == GGML_TYPE_I32) {
  872. // pos
  873. std::vector<int> data(ne[2]);
  874. for (int i = 0; i < ne[2]; i++) {
  875. data[i] = rand() % n_ctx;
  876. }
  877. ggml_backend_tensor_set(t, data.data(), 0, ne[2] * sizeof(int));
  878. } else {
  879. init_tensor_uniform(t);
  880. }
  881. }
  882. }
  883. };
  884. // GGML_OP_ALIBI
  885. struct test_alibi : public test_case {
  886. const ggml_type type;
  887. const std::array<int64_t, 4> ne;
  888. int n_past;
  889. int n_head;
  890. float bias_max;
  891. std::string vars() override {
  892. return VARS_TO_STR5(type, ne, n_past, n_head, bias_max);
  893. }
  894. test_alibi(ggml_type type = GGML_TYPE_F32,
  895. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  896. int n_past = 512, int n_head = 10, float bias_max = 0.5f)
  897. : type(type), ne(ne), n_past(n_past), n_head(n_head), bias_max(bias_max) {}
  898. ggml_tensor * build_graph(ggml_context * ctx) override {
  899. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  900. ggml_tensor * out = ggml_alibi(ctx, a, n_past, n_head, bias_max);
  901. return out;
  902. }
  903. };
  904. // GGML_OP_IM2COL
  905. struct test_im2col : public test_case {
  906. const ggml_type type_input;
  907. const ggml_type type_kernel;
  908. const std::array<int64_t, 4> ne_input;
  909. const std::array<int64_t, 4> ne_kernel;
  910. // stride
  911. const int s0;
  912. const int s1;
  913. // padding
  914. const int p0;
  915. const int p1;
  916. // dilatation
  917. const int d0;
  918. const int d1;
  919. // mode
  920. const bool is_2D;
  921. std::string vars() override {
  922. return VARS_TO_STR11(type_input, type_kernel, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D);
  923. }
  924. test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16,
  925. std::array<int64_t, 4> ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1]
  926. std::array<int64_t, 4> ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1]
  927. int s0 = 1, int s1 = 1,
  928. int p0 = 1, int p1 = 1,
  929. int d0 = 1, int d1 = 1,
  930. bool is_2D = true)
  931. : type_input(type_input), type_kernel(type_kernel), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {}
  932. ggml_tensor * build_graph(ggml_context * ctx) override {
  933. ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data());
  934. ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data());
  935. ggml_tensor * out = ggml_im2col(ctx, kernel, input, s0, s1, p0, p1, d0, d1, is_2D);
  936. return out;
  937. }
  938. };
  939. // GGML_OP_CONCAT
  940. struct test_concat : public test_case {
  941. const ggml_type type;
  942. const std::array<int64_t, 4> ne;
  943. const int64_t b_ne2;
  944. std::string vars() override {
  945. return VARS_TO_STR3(type, ne, b_ne2);
  946. }
  947. test_concat(ggml_type type = GGML_TYPE_F32,
  948. std::array<int64_t, 4> ne = {10, 10, 10, 10},
  949. int64_t b_ne2 = 10)
  950. : type(type), ne(ne), b_ne2(b_ne2) {}
  951. ggml_tensor * build_graph(ggml_context * ctx) override {
  952. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  953. ggml_tensor * b = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], b_ne2, ne[3]);
  954. ggml_tensor * out = ggml_concat(ctx, a, b);
  955. return out;
  956. }
  957. };
  958. // GGML_OP_ARGSORT
  959. struct test_argsort : public test_case {
  960. const ggml_type type;
  961. const std::array<int64_t, 4> ne;
  962. ggml_sort_order order;
  963. std::string vars() override {
  964. return VARS_TO_STR3(type, ne, order);
  965. }
  966. test_argsort(ggml_type type = GGML_TYPE_F32,
  967. std::array<int64_t, 4> ne = {16, 10, 10, 10},
  968. ggml_sort_order order = GGML_SORT_ASC)
  969. : type(type), ne(ne), order(order) {}
  970. ggml_tensor * build_graph(ggml_context * ctx) override {
  971. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  972. ggml_tensor * out = ggml_argsort(ctx, a, order);
  973. return out;
  974. }
  975. void initialize_tensors(ggml_context * ctx) override {
  976. std::random_device rd;
  977. std::default_random_engine rng(rd());
  978. for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
  979. if (t->type == GGML_TYPE_I32) {
  980. // indices
  981. std::vector<int> data(ggml_nelements(t));
  982. for (int i = 0; i < ggml_nelements(t); i++) {
  983. data[i] = rand();
  984. }
  985. std::shuffle(data.begin(), data.end(), rng);
  986. ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int));
  987. } else if (t->type == GGML_TYPE_F32) {
  988. // initialize with unique values to avoid ties
  989. for (int64_t r = 0; r < ggml_nrows(t); r++) {
  990. std::vector<float> data(t->ne[0]);
  991. for (int i = 0; i < t->ne[0]; i++) {
  992. data[i] = i;
  993. }
  994. std::shuffle(data.begin(), data.end(), rng);
  995. ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float));
  996. }
  997. } else {
  998. GGML_ASSERT(false);
  999. }
  1000. }
  1001. }
  1002. };
  1003. // GGML_OP_SUM_ROWS
  1004. struct test_sum_rows : public test_case {
  1005. const ggml_type type;
  1006. const std::array<int64_t, 4> ne;
  1007. std::string vars() override {
  1008. return VARS_TO_STR2(type, ne);
  1009. }
  1010. test_sum_rows(ggml_type type = GGML_TYPE_F32,
  1011. std::array<int64_t, 4> ne = {10, 10, 10, 10})
  1012. : type(type), ne(ne) {}
  1013. ggml_tensor * build_graph(ggml_context * ctx) override {
  1014. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1015. ggml_tensor * out = ggml_sum_rows(ctx, a);
  1016. return out;
  1017. }
  1018. };
  1019. // GGML_OP_UPSCALE
  1020. struct test_upscale : public test_case {
  1021. const ggml_type type;
  1022. const std::array<int64_t, 4> ne;
  1023. const int32_t scale_factor;
  1024. std::string vars() override {
  1025. return VARS_TO_STR3(type, ne, scale_factor);
  1026. }
  1027. test_upscale(ggml_type type = GGML_TYPE_F32,
  1028. std::array<int64_t, 4> ne = {512, 512, 3, 1},
  1029. int32_t scale_factor = 2)
  1030. : type(type), ne(ne), scale_factor(scale_factor) {}
  1031. ggml_tensor * build_graph(ggml_context * ctx) override {
  1032. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1033. ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
  1034. return out;
  1035. }
  1036. };
  1037. // GGML_OP_GROUP_NORM
  1038. struct test_group_norm : public test_case {
  1039. const ggml_type type;
  1040. const std::array<int64_t, 4> ne;
  1041. const int32_t num_groups;
  1042. std::string vars() override {
  1043. return VARS_TO_STR3(type, ne, num_groups);
  1044. }
  1045. test_group_norm(ggml_type type = GGML_TYPE_F32,
  1046. std::array<int64_t, 4> ne = {64, 64, 320, 1},
  1047. int32_t num_groups = 32)
  1048. : type(type), ne(ne), num_groups(num_groups) {}
  1049. ggml_tensor * build_graph(ggml_context * ctx) override {
  1050. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
  1051. ggml_tensor * out = ggml_group_norm(ctx, a, num_groups);
  1052. return out;
  1053. }
  1054. };
  1055. // GGML_OP_ACC
  1056. struct test_acc : public test_case {
  1057. const ggml_type type;
  1058. const std::array<int64_t, 4> ne_a;
  1059. const std::array<int64_t, 4> ne_b;
  1060. std::string vars() override {
  1061. return VARS_TO_STR3(type, ne_a, ne_b);
  1062. }
  1063. test_acc(ggml_type type = GGML_TYPE_F32,
  1064. std::array<int64_t, 4> ne_a = {1024, 577, 1, 1},
  1065. std::array<int64_t, 4> ne_b = {1024, 576, 1, 1})
  1066. : type(type), ne_a(ne_a), ne_b(ne_b) {}
  1067. ggml_tensor * build_graph(ggml_context * ctx) override {
  1068. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1069. ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne_b.data());
  1070. ggml_tensor * out = ggml_acc(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], b->nb[1]);
  1071. return out;
  1072. }
  1073. };
  1074. // GGML_OP_PAD
  1075. struct test_pad : public test_case {
  1076. const ggml_type type;
  1077. const std::array<int64_t, 4> ne_a;
  1078. const int pad_0;
  1079. const int pad_1;
  1080. std::string vars() override {
  1081. return VARS_TO_STR4(type, ne_a, pad_0, pad_1);
  1082. }
  1083. test_pad(ggml_type type = GGML_TYPE_F32,
  1084. std::array<int64_t, 4> ne_a = {512, 512, 1, 1},
  1085. int pad_0 = 1, int pad_1 = 1)
  1086. : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {}
  1087. ggml_tensor * build_graph(ggml_context * ctx) override {
  1088. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1089. ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0);
  1090. return out;
  1091. }
  1092. };
  1093. // GGML_OP_LEAKY_RELU
  1094. struct test_leaky_relu : public test_case {
  1095. const ggml_type type;
  1096. const std::array<int64_t, 4> ne_a;
  1097. const float negative_slope;
  1098. std::string vars() override {
  1099. return VARS_TO_STR3(type, ne_a, negative_slope);
  1100. }
  1101. test_leaky_relu(ggml_type type = GGML_TYPE_F32,
  1102. std::array<int64_t, 4> ne_a = {10, 10, 10, 10},
  1103. float negative_slope = 0.1f)
  1104. : type(type), ne_a(ne_a), negative_slope(negative_slope) {}
  1105. ggml_tensor * build_graph(ggml_context * ctx) override {
  1106. ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data());
  1107. ggml_tensor * out = ggml_leaky_relu(ctx, a, negative_slope, true);
  1108. return out;
  1109. }
  1110. };
  1111. // Mixtral MOE
  1112. struct test_moe : public test_case {
  1113. const int n_experts;
  1114. const int n_experts_per_tok;
  1115. const int n_tokens;
  1116. const int n_embd;
  1117. const int n_ff;
  1118. std::string op_desc(ggml_tensor * t) override {
  1119. return "MOE";
  1120. GGML_UNUSED(t);
  1121. }
  1122. std::string vars() override {
  1123. return VARS_TO_STR5(n_experts, n_experts_per_tok, n_tokens, n_embd, n_ff);
  1124. }
  1125. test_moe(int n_experts = 8, int n_experts_per_tok = 2, int n_tokens = 1, int n_embd = 4096, int n_ff = 14336)
  1126. : n_experts(n_experts), n_experts_per_tok(n_experts_per_tok), n_tokens(n_tokens), n_embd(n_embd), n_ff(n_ff) {
  1127. }
  1128. ggml_tensor * build_graph(ggml_context * ctx) override {
  1129. ggml_tensor * ffn_gate_inp = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_experts);
  1130. std::vector<ggml_tensor *> ffn_up_exp(n_experts);
  1131. std::vector<ggml_tensor *> ffn_gate_exp(n_experts);
  1132. std::vector<ggml_tensor *> ffn_down_exp(n_experts);
  1133. for (int i = 0; i < n_experts; ++i) {
  1134. ffn_up_exp[i] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
  1135. ffn_gate_exp[i] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
  1136. ffn_down_exp[i] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd);
  1137. }
  1138. ggml_tensor * cur = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_tokens);
  1139. ggml_tensor * logits = ggml_mul_mat(ctx, ffn_gate_inp, cur);
  1140. ggml_tensor * probs = ggml_soft_max_ext(ctx, logits, nullptr, 1.0f/sqrtf(n_embd));
  1141. // select experts
  1142. ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_experts_per_tok);
  1143. ggml_tensor * weights = ggml_get_rows(ctx,
  1144. ggml_reshape_3d(ctx, probs, 1, n_experts, n_tokens), selected_experts);
  1145. weights = ggml_reshape_2d(ctx, weights, n_experts_per_tok, n_tokens);
  1146. ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights);
  1147. weights = ggml_div(ctx, weights, weights_sum);
  1148. // compute expert outputs
  1149. ggml_tensor * moe_out = nullptr;
  1150. for (int i = 0; i < n_experts_per_tok; ++i) {
  1151. ggml_tensor * cur_expert;
  1152. ggml_tensor * cur_up = ggml_mul_mat_id(ctx, ffn_up_exp.data(), n_experts, selected_experts, i, cur);
  1153. ggml_tensor * cur_gate = ggml_mul_mat_id(ctx, ffn_gate_exp.data(), n_experts, selected_experts, i, cur);
  1154. cur_gate = ggml_silu(ctx, cur_gate);
  1155. cur_expert = ggml_mul(ctx, cur_up, cur_gate);
  1156. cur_expert = ggml_mul_mat_id(ctx, ffn_down_exp.data(), n_experts, selected_experts, i, cur_expert);
  1157. cur_expert = ggml_mul(ctx, cur_expert,
  1158. ggml_view_2d(ctx, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
  1159. if (i == 0) {
  1160. moe_out = cur_expert;
  1161. } else {
  1162. moe_out = ggml_add(ctx, moe_out, cur_expert);
  1163. }
  1164. }
  1165. cur = moe_out;
  1166. return cur;
  1167. }
  1168. };
  1169. static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
  1170. std::vector<std::unique_ptr<test_case>> test_cases;
  1171. const ggml_type all_types[] = {
  1172. GGML_TYPE_F32, GGML_TYPE_F16,
  1173. GGML_TYPE_Q4_0, GGML_TYPE_Q4_1,
  1174. GGML_TYPE_Q5_0, GGML_TYPE_Q5_1,
  1175. GGML_TYPE_Q8_0,
  1176. GGML_TYPE_Q2_K, GGML_TYPE_Q3_K,
  1177. GGML_TYPE_Q4_K, GGML_TYPE_Q5_K,
  1178. GGML_TYPE_Q6_K
  1179. };
  1180. // unary ops
  1181. for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) {
  1182. test_cases.emplace_back(new test_unary((ggml_unary_op) op));
  1183. }
  1184. test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false));
  1185. for (ggml_type type : all_types) {
  1186. for (int b : {1, 7}) {
  1187. for (bool v : {false, true}) {
  1188. test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v));
  1189. }
  1190. }
  1191. }
  1192. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
  1193. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
  1194. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
  1195. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 2, 1}));
  1196. test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 2}));
  1197. test_cases.emplace_back(new test_dup());
  1198. for (ggml_type type : all_types) {
  1199. test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, type, {256, 10, 10, 1}));
  1200. }
  1201. test_cases.emplace_back(new test_cont());
  1202. auto add_test_bin_bcast = [&](ggml_type type, std::array<int64_t, 4> ne, std::array<int, 4> nr) {
  1203. for (auto op : {ggml_add, ggml_mul, ggml_div}) {
  1204. test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr));
  1205. }
  1206. };
  1207. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 8, 1}, {1, 1, 1, 1});
  1208. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1, 1}, {32, 1, 1, 1});
  1209. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 320, 320}, {1, 1, 1, 1});
  1210. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 1, 1}, {1, 1, 1, 1});
  1211. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 1}, {1, 1, 1, 1});
  1212. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 1});
  1213. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 1, 1, 1});
  1214. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 1, 1});
  1215. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 1});
  1216. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 1, 2});
  1217. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 1, 2, 2});
  1218. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {1, 2, 2, 2});
  1219. add_test_bin_bcast(GGML_TYPE_F32, {16, 10, 10, 10}, {2, 2, 2, 2});
  1220. // stable diffusion
  1221. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 1, 1, 1});
  1222. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 16, 16, 1});
  1223. add_test_bin_bcast(GGML_TYPE_F32, {1280, 16, 16, 1}, {1, 1, 1, 1});
  1224. add_test_bin_bcast(GGML_TYPE_F32, {1280, 1, 1, 1}, {1, 256, 1, 1});
  1225. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {16, 16, 1, 1});
  1226. add_test_bin_bcast(GGML_TYPE_F32, {16, 16, 1280, 1}, {1, 1, 1, 1});
  1227. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {16, 16, 1, 1});
  1228. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 2560, 1}, {16, 16, 1, 1});
  1229. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1280, 1}, {32, 32, 1, 1});
  1230. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 1920, 1}, {32, 32, 1, 1});
  1231. add_test_bin_bcast(GGML_TYPE_F32, {1, 1, 640, 1}, {32, 32, 1, 1});
  1232. add_test_bin_bcast(GGML_TYPE_F32, {5120, 1, 1, 1}, {1, 256, 1, 1});
  1233. add_test_bin_bcast(GGML_TYPE_F32, {640, 1, 1, 1}, {1, 1, 1, 1});
  1234. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {1, 1, 1, 1});
  1235. //add_test_bin_bcast(GGML_TYPE_F32, {3, 3, 2560, 1280}, {2, 1, 1, 1});
  1236. test_cases.emplace_back(new test_scale());
  1237. for (float eps : {1e-6f, 1e-5f, 1e-3f, 1e-1f}) {
  1238. test_cases.emplace_back(new test_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1239. test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
  1240. }
  1241. for (ggml_type type_a : all_types) {
  1242. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1243. // FIXME: CPU crashes on f16xf16
  1244. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
  1245. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1}));
  1246. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1}));
  1247. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1}));
  1248. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1}));
  1249. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2}));
  1250. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2}));
  1251. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1}));
  1252. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1}));
  1253. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1}));
  1254. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1}));
  1255. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1}));
  1256. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2}));
  1257. test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
  1258. }
  1259. }
  1260. for (ggml_type type_a : all_types) {
  1261. for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
  1262. for (int n_mats : {2, 4, 8}) {
  1263. for (int id = 0; id < n_mats; id++) {
  1264. for (bool v : {false, true}) {
  1265. test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, id, 16, 16, 256, v));
  1266. }
  1267. }
  1268. }
  1269. }
  1270. }
  1271. test_cases.emplace_back(new test_sqr());
  1272. test_cases.emplace_back(new test_clamp());
  1273. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
  1274. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 1}, 5));
  1275. test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 10}, 5));
  1276. test_cases.emplace_back(new test_soft_max());
  1277. for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
  1278. test_cases.emplace_back(new test_rope(type, {128, 32, 10, 1}, 128, 0, 512)); // llama 7B
  1279. test_cases.emplace_back(new test_rope(type, {128, 40, 10, 1}, 128, 0, 512)); // llama 13B
  1280. test_cases.emplace_back(new test_rope(type, {128, 52, 10, 1}, 128, 0, 512)); // llama 30B
  1281. test_cases.emplace_back(new test_rope(type, {128, 64, 10, 1}, 128, 0, 512)); // llama 65B
  1282. test_cases.emplace_back(new test_rope(type, { 64, 1, 10, 1}, 64, 2, 512)); // neox (falcon 7B)
  1283. test_cases.emplace_back(new test_rope(type, { 64, 71, 10, 1}, 64, 2, 512)); // neox (falcon 7B)
  1284. test_cases.emplace_back(new test_rope(type, { 64, 8, 10, 1}, 64, 2, 512)); // neox (falcon 40B)
  1285. test_cases.emplace_back(new test_rope(type, { 64, 128, 10, 1}, 64, 2, 512)); // neox (falcon 40B)
  1286. test_cases.emplace_back(new test_rope(type, { 80, 32, 10, 1}, 20, 2, 512)); // neox (stablelm)
  1287. }
  1288. test_cases.emplace_back(new test_alibi());
  1289. test_cases.emplace_back(new test_im2col());
  1290. test_cases.emplace_back(new test_concat());
  1291. for (ggml_sort_order order : {GGML_SORT_ASC, GGML_SORT_DESC}) {
  1292. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order));
  1293. test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order));
  1294. }
  1295. test_cases.emplace_back(new test_sum_rows());
  1296. test_cases.emplace_back(new test_upscale());
  1297. test_cases.emplace_back(new test_group_norm());
  1298. test_cases.emplace_back(new test_acc());
  1299. test_cases.emplace_back(new test_pad());
  1300. test_cases.emplace_back(new test_leaky_relu());
  1301. #if !defined(__SANITIZE_THREAD__)
  1302. // FIXME: these tests use too much memory with thread sanitizer
  1303. test_cases.emplace_back(new test_moe(8, 2, 1, 4096, 8*1024));
  1304. //test_cases.emplace_back(new test_moe(8, 2, 8, 4096, 14336));
  1305. #endif
  1306. // run tests
  1307. if (mode == MODE_TEST) {
  1308. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  1309. size_t n_ok = 0;
  1310. for (auto & test : test_cases) {
  1311. if (test->eval(backend, backend_cpu, op_name)) {
  1312. n_ok++;
  1313. }
  1314. }
  1315. printf(" %zu/%zu tests passed\n", n_ok, test_cases.size());
  1316. ggml_backend_free(backend_cpu);
  1317. return n_ok == test_cases.size();
  1318. }
  1319. if (mode == MODE_PERF) {
  1320. for (auto & test : test_cases) {
  1321. test->eval_perf(backend, op_name);
  1322. }
  1323. return true;
  1324. }
  1325. GGML_ASSERT(false);
  1326. return false;
  1327. }
  1328. static void usage(char ** argv) {
  1329. printf("Usage: %s [mode] [-o op] [-b backend]\n", argv[0]);
  1330. printf(" valid modes are: test (compare with CPU backend for correctness) or perf (performance evaluation)\n");
  1331. printf(" op names are as given by ggml_op_desc()\n");
  1332. }
  1333. int main(int argc, char ** argv) {
  1334. test_mode mode = MODE_TEST;
  1335. const char * op_name = NULL;
  1336. const char * backend = NULL;
  1337. for (int i = 1; i < argc; i++) {
  1338. if (strcmp(argv[i], "test") == 0) {
  1339. mode = MODE_TEST;
  1340. } else if (strcmp(argv[i], "perf") == 0) {
  1341. mode = MODE_PERF;
  1342. } else if (strcmp(argv[i], "-o") == 0) {
  1343. if (i + 1 < argc) {
  1344. op_name = argv[++i];
  1345. } else {
  1346. usage(argv);
  1347. return 1;
  1348. }
  1349. } else if (strcmp(argv[i], "-b") == 0) {
  1350. if (i + 1 < argc) {
  1351. backend = argv[++i];
  1352. } else {
  1353. usage(argv);
  1354. return 1;
  1355. }
  1356. } else {
  1357. usage(argv);
  1358. return 1;
  1359. }
  1360. }
  1361. // enumerate backends
  1362. printf("Testing %zu backends\n\n", ggml_backend_reg_get_count());
  1363. size_t n_ok = 0;
  1364. for (size_t i = 0; i < ggml_backend_reg_get_count(); i++) {
  1365. printf("Backend %zu/%zu (%s)\n", i + 1, ggml_backend_reg_get_count(), ggml_backend_reg_get_name(i));
  1366. if (backend != NULL && strcmp(backend, ggml_backend_reg_get_name(i)) != 0) {
  1367. printf(" Skipping\n");
  1368. n_ok++;
  1369. continue;
  1370. }
  1371. ggml_backend_t backend = ggml_backend_reg_init_backend(i, NULL);
  1372. GGML_ASSERT(backend != NULL);
  1373. printf(" Backend name: %s\n", ggml_backend_name(backend));
  1374. bool ok = test_backend(backend, mode, op_name);
  1375. printf(" Backend %s: ", ggml_backend_name(backend));
  1376. if (ok) {
  1377. printf("\033[1;32mOK\033[0m\n");
  1378. n_ok++;
  1379. } else {
  1380. printf("\033[1;31mFAIL\033[0m\n");
  1381. }
  1382. printf("\n");
  1383. ggml_backend_free(backend);
  1384. }
  1385. printf("%zu/%zu backends passed\n", n_ok, ggml_backend_reg_get_count());
  1386. if (n_ok != ggml_backend_reg_get_count()) {
  1387. printf("\033[1;31mFAIL\033[0m\n");
  1388. return 1;
  1389. }
  1390. printf("\033[1;32mOK\033[0m\n");
  1391. return 0;
  1392. }