vec.h 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462
  1. // Vectorized functions for fundamental operations
  2. #pragma once
  3. #include "ggml-impl.h"
  4. #include "simd-mappings.h"
  5. #include "ggml.h"
  6. #include "ggml-cpu.h"
  7. #if defined(GGML_USE_ACCELERATE)
  8. #include <Accelerate/Accelerate.h>
  9. #endif
  10. // floating point type used to accumulate sums
  11. typedef double ggml_float;
  12. #define GGML_GELU_FP16
  13. #define GGML_GELU_QUICK_FP16
  14. #define GGML_SOFT_MAX_UNROLL 4
  15. #define GGML_VEC_DOT_UNROLL 2
  16. #define GGML_VEC_MAD_UNROLL 32
  17. #ifdef __cplusplus
  18. extern "C" {
  19. #endif
  20. //
  21. // global data
  22. //
  23. // precomputed gelu table for f16 (128 KB)
  24. extern ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  25. // precomputed quick gelu table for f16 (128 KB)
  26. extern ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  27. //
  28. // fundamental operations
  29. //
  30. void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc);
  31. void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc);
  32. void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc);
  33. void ggml_vec_silu_f32(const int n, float * y, const float * x);
  34. ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max);
  35. ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max);
  36. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  37. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  38. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  39. inline static void ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  40. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const ggml_fp16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  41. inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  42. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) {
  43. int i = 0;
  44. #if defined(__AVX2__)
  45. for (; i + 7 < n; i += 8) {
  46. __m256 vx = _mm256_loadu_ps(x + i);
  47. __m256 vy = _mm256_loadu_ps(y + i);
  48. __m256 vz = _mm256_add_ps(vx, vy);
  49. _mm256_storeu_ps(z + i, vz);
  50. }
  51. #endif
  52. for (; i < n; ++i) {
  53. z[i] = x[i] + y[i];
  54. }
  55. }
  56. inline static void ggml_vec_add_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) {
  57. for (int i = 0; i < n; ++i) {
  58. z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) + GGML_CPU_FP16_TO_FP32(y[i]));
  59. }
  60. }
  61. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  62. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  63. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  64. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  65. inline static void ggml_vec_sub_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) {
  66. for (int i = 0; i < n; ++i) {
  67. z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) - GGML_CPU_FP16_TO_FP32(y[i]));
  68. }
  69. }
  70. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  71. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  72. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  73. inline static void ggml_vec_neg_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  74. for (int i = 0; i < n; ++i) {
  75. y[i] = GGML_CPU_FP32_TO_FP16(-GGML_CPU_FP16_TO_FP32(x[i]));
  76. }
  77. }
  78. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  79. inline static void ggml_vec_mul_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) {
  80. for (int i = 0; i < n; ++i) {
  81. z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) * GGML_CPU_FP16_TO_FP32(y[i]));
  82. }
  83. }
  84. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  85. inline static void ggml_vec_div_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) {
  86. for (int i = 0; i < n; ++i) {
  87. z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) / GGML_CPU_FP16_TO_FP32(y[i]));
  88. }
  89. }
  90. // compute GGML_VEC_DOT_UNROLL dot products at once
  91. // xs - x row stride in bytes
  92. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GGML_RESTRICT s, void * GGML_RESTRICT xv, ggml_fp16_t * GGML_RESTRICT y) {
  93. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  94. ggml_fp16_t * GGML_RESTRICT x[GGML_VEC_DOT_UNROLL];
  95. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  96. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  97. }
  98. #if defined(GGML_SIMD)
  99. #if defined(__ARM_FEATURE_SVE)
  100. const int sve_register_length = svcntb() * 8;
  101. const int ggml_f16_epr = sve_register_length / 16; // running when 16
  102. const int ggml_f16_step = 8 * ggml_f16_epr; // choose 8 SVE registers
  103. const int np = (n & ~(ggml_f16_step - 1));
  104. svfloat16_t sum_00 = svdup_n_f16(0.0f);
  105. svfloat16_t sum_01 = svdup_n_f16(0.0f);
  106. svfloat16_t sum_02 = svdup_n_f16(0.0f);
  107. svfloat16_t sum_03 = svdup_n_f16(0.0f);
  108. svfloat16_t sum_10 = svdup_n_f16(0.0f);
  109. svfloat16_t sum_11 = svdup_n_f16(0.0f);
  110. svfloat16_t sum_12 = svdup_n_f16(0.0f);
  111. svfloat16_t sum_13 = svdup_n_f16(0.0f);
  112. svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8;
  113. svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8;
  114. for (int i = 0; i < np; i += ggml_f16_step) {
  115. ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0); // 8 elements
  116. ax1 = GGML_F16x_VEC_LOAD(x[0] + i + 0*ggml_f16_epr, 0); // 8 elemnst
  117. sum_00 = GGML_F16x_VEC_FMA(sum_00, ax1, ay1); // sum_00 = sum_00+ax1*ay1
  118. ax1 = GGML_F16x_VEC_LOAD(x[1] + i + 0*ggml_f16_epr, 0); // 8 elements
  119. sum_10 = GGML_F16x_VEC_FMA(sum_10, ax1, ay1);
  120. ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1); // next 8 elements
  121. ax2 = GGML_F16x_VEC_LOAD(x[0] + i + 1*ggml_f16_epr, 1); // next 8 ekements
  122. sum_01 = GGML_F16x_VEC_FMA(sum_01, ax2, ay2);
  123. ax2 = GGML_F16x_VEC_LOAD(x[1] + i + 1*ggml_f16_epr, 1);
  124. sum_11 = GGML_F16x_VEC_FMA(sum_11, ax2, ay2);
  125. ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2);
  126. ax3 = GGML_F16x_VEC_LOAD(x[0] + i + 2*ggml_f16_epr, 2);
  127. sum_02 = GGML_F16x_VEC_FMA(sum_02, ax3, ay3);
  128. ax1 = GGML_F16x_VEC_LOAD(x[1] + i + 2*ggml_f16_epr, 2);
  129. sum_12 = GGML_F16x_VEC_FMA(sum_12, ax3, ay3);
  130. ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3);
  131. ax4 = GGML_F16x_VEC_LOAD(x[0] + i + 3*ggml_f16_epr, 3);
  132. sum_03 = GGML_F16x_VEC_FMA(sum_03, ax4, ay4);
  133. ax4 = GGML_F16x_VEC_LOAD(x[1] + i + 3*ggml_f16_epr, 3);
  134. sum_13 = GGML_F16x_VEC_FMA(sum_13, ax4, ay4);
  135. ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4);
  136. ax5 = GGML_F16x_VEC_LOAD(x[0] + i + 4*ggml_f16_epr, 4);
  137. sum_00 = GGML_F16x_VEC_FMA(sum_00, ax5, ay5);
  138. ax5 = GGML_F16x_VEC_LOAD(x[1] + i + 4*ggml_f16_epr, 4);
  139. sum_10 = GGML_F16x_VEC_FMA(sum_10, ax5, ay5);
  140. ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5);
  141. ax6 = GGML_F16x_VEC_LOAD(x[0] + i + 5*ggml_f16_epr, 5);
  142. sum_01 = GGML_F16x_VEC_FMA(sum_01, ax6, ay6);
  143. ax6 = GGML_F16x_VEC_LOAD(x[1] + i + 5*ggml_f16_epr, 5);
  144. sum_11 = GGML_F16x_VEC_FMA(sum_11, ax6, ay6);
  145. ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6);
  146. ax7 = GGML_F16x_VEC_LOAD(x[0] + i + 6*ggml_f16_epr, 6);
  147. sum_02 = GGML_F16x_VEC_FMA(sum_02, ax7, ay7);
  148. ax7 = GGML_F16x_VEC_LOAD(x[1] + i + 6*ggml_f16_epr, 6);
  149. sum_12 = GGML_F16x_VEC_FMA(sum_12, ax7, ay7);
  150. ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7);
  151. ax8 = GGML_F16x_VEC_LOAD(x[0] + i + 7*ggml_f16_epr, 7);
  152. sum_03 = GGML_F16x_VEC_FMA(sum_03, ax8, ay8);
  153. ax8 = GGML_F16x_VEC_LOAD(x[1] + i + 7*ggml_f16_epr, 7);
  154. sum_13 = GGML_F16x_VEC_FMA(sum_13, ax8, ay8);
  155. }
  156. const int np2 = (n & ~(ggml_f16_epr - 1));
  157. for (int k = np; k < np2; k += ggml_f16_epr) {
  158. svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0);
  159. svfloat16_t rx = GGML_F16x_VEC_LOAD(x[0] + k, 0);
  160. sum_00 = GGML_F16x_VEC_FMA(sum_00, rx, ry);
  161. rx = GGML_F16x_VEC_LOAD(x[1] + k, 0);
  162. sum_10 = GGML_F16x_VEC_FMA(sum_10, rx, ry);
  163. }
  164. if (np2 < n) {
  165. svbool_t pg = svwhilelt_b16(np2, n);
  166. svfloat16_t hx_0 = svld1_f16(pg, (const __fp16 *)(x[0] + np2));
  167. svfloat16_t hx_1 = svld1_f16(pg, (const __fp16 *)(x[1] + np2));
  168. svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2));
  169. sum_00 = svmad_f16_x(pg, hx_0, hy, sum_00);
  170. sum_10 = svmad_f16_x(pg, hx_1, hy, sum_10);
  171. }
  172. GGML_F16x_VEC_REDUCE(sumf[0], sum_00, sum_01, sum_02, sum_03);
  173. GGML_F16x_VEC_REDUCE(sumf[1], sum_10, sum_11, sum_12, sum_13);
  174. #elif defined(__riscv_v_intrinsic)
  175. // todo: RVV impl
  176. for (int i = 0; i < n; ++i) {
  177. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  178. sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i]));
  179. }
  180. }
  181. #else
  182. const int np = (n & ~(GGML_F16_STEP - 1));
  183. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  184. GGML_F16_VEC ax[GGML_F16_ARR];
  185. GGML_F16_VEC ay[GGML_F16_ARR];
  186. for (int i = 0; i < np; i += GGML_F16_STEP) {
  187. for (int j = 0; j < GGML_F16_ARR; j++) {
  188. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  189. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  190. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  191. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  192. }
  193. }
  194. }
  195. // reduce sum0..sum3 to sum0
  196. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  197. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  198. }
  199. // leftovers
  200. for (int i = np; i < n; ++i) {
  201. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  202. sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i]));
  203. }
  204. }
  205. #endif
  206. #else
  207. for (int i = 0; i < n; ++i) {
  208. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  209. sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i]));
  210. }
  211. }
  212. #endif
  213. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  214. s[i] = (float)sumf[i];
  215. }
  216. }
  217. inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const float * GGML_RESTRICT x, const float v) {
  218. #if defined(GGML_SIMD)
  219. #if defined(__ARM_FEATURE_SVE)
  220. const int sve_register_length = ggml_cpu_get_sve_cnt() * 8;
  221. const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16
  222. const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers
  223. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  224. const int np = (n & ~(ggml_f32_step - 1));
  225. svfloat32_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8;
  226. svfloat32_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8;
  227. for (int i = 0; i < np; i += ggml_f32_step) {
  228. ax1 = GGML_F32_VEC_LOAD(x + i);
  229. ay1 = GGML_F32_VEC_LOAD(y + i);
  230. ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx);
  231. GGML_F32_VEC_STORE(y + i, ay1);
  232. ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr);
  233. ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr);
  234. ay2 = GGML_F32_VEC_FMA(ay2, ax2, vx);
  235. GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2);
  236. ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr);
  237. ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr);
  238. ay3 = GGML_F32_VEC_FMA(ay3, ax3, vx);
  239. GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3);
  240. ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr);
  241. ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr);
  242. ay4 = GGML_F32_VEC_FMA(ay4, ax4, vx);
  243. GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4);
  244. ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr);
  245. ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr);
  246. ay5 = GGML_F32_VEC_FMA(ay5, ax5, vx);
  247. GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5);
  248. ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr);
  249. ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr);
  250. ay6 = GGML_F32_VEC_FMA(ay6, ax6, vx);
  251. GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6);
  252. ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr);
  253. ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr);
  254. ay7 = GGML_F32_VEC_FMA(ay7, ax7, vx);
  255. GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7);
  256. ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr);
  257. ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr);
  258. ay8 = GGML_F32_VEC_FMA(ay8, ax8, vx);
  259. GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8);
  260. }
  261. // leftovers
  262. // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop
  263. const int np2 = (n & ~(ggml_f32_epr - 1));
  264. for (int i = np; i < np2; i += ggml_f32_epr) {
  265. ax1 = GGML_F32_VEC_LOAD(x + i);
  266. ay1 = GGML_F32_VEC_LOAD(y + i);
  267. ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx);
  268. GGML_F32_VEC_STORE(y + i, ay1);
  269. }
  270. // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only
  271. if (np2 < n) {
  272. svbool_t pg =svwhilelt_b32(np2, n);
  273. ax1 = svld1_f32(pg, x + np2);
  274. ay1 = svld1_f32(pg, y + np2);
  275. ay1 = svmad_f32_m(pg, ax1, vx, ay1);
  276. svst1_f32(pg, y + np2, ay1);
  277. }
  278. #elif defined(__riscv_v_intrinsic)
  279. for (int i = 0, avl; i < n; i += avl) {
  280. avl = __riscv_vsetvl_e32m8(n - i);
  281. vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[i], avl);
  282. vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl);
  283. vfloat32m8_t ny = __riscv_vfmadd_vf_f32m8(ax, v, ay, avl);
  284. __riscv_vse32_v_f32m8(&y[i], ny, avl);
  285. }
  286. #else
  287. const int np = (n & ~(GGML_F32_STEP - 1));
  288. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  289. GGML_F32_VEC ax[GGML_F32_ARR];
  290. GGML_F32_VEC ay[GGML_F32_ARR];
  291. for (int i = 0; i < np; i += GGML_F32_STEP) {
  292. for (int j = 0; j < GGML_F32_ARR; j++) {
  293. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  294. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  295. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  296. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  297. }
  298. }
  299. // leftovers
  300. for (int i = np; i < n; ++i) {
  301. y[i] += x[i]*v;
  302. }
  303. #endif
  304. #else
  305. // scalar
  306. for (int i = 0; i < n; ++i) {
  307. y[i] += x[i]*v;
  308. }
  309. #endif
  310. }
  311. inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * GGML_RESTRICT y, const ggml_fp16_t * GGML_RESTRICT x, const float v) {
  312. #if defined(GGML_SIMD)
  313. #if defined(__ARM_FEATURE_SVE)
  314. const int sve_register_length = svcntb() * 8;
  315. const int ggml_f16_epr = sve_register_length / 16;
  316. const int ggml_f16_step = 8 * ggml_f16_epr;
  317. GGML_F16x_VEC vx = GGML_F16x_VEC_SET1(v);
  318. const int np= (n & ~(ggml_f16_step - 1));
  319. svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8;
  320. svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8;
  321. for (int i = 0; i < np; i += ggml_f16_step) {
  322. ax1 = GGML_F16x_VEC_LOAD(x + i + 0 * ggml_f16_epr, 0);
  323. ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0);
  324. ay1 = GGML_F16x_VEC_FMA(ay1, ax1, vx);
  325. GGML_F16x_VEC_STORE(y + i + 0 * ggml_f16_epr, ay1, 0);
  326. ax2 = GGML_F16x_VEC_LOAD(x + i + 1 * ggml_f16_epr, 1);
  327. ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1);
  328. ay2 = GGML_F16x_VEC_FMA(ay2, ax2, vx);
  329. GGML_F16x_VEC_STORE(y + i + 1 * ggml_f16_epr, ay2, 1);
  330. ax3 = GGML_F16x_VEC_LOAD(x + i + 2 * ggml_f16_epr, 2);
  331. ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2);
  332. ay3 = GGML_F16x_VEC_FMA(ay3, ax3, vx);
  333. GGML_F16x_VEC_STORE(y + i + 2 * ggml_f16_epr, ay3, 2);
  334. ax4 = GGML_F16x_VEC_LOAD(x + i + 3 * ggml_f16_epr, 3);
  335. ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3);
  336. ay4 = GGML_F16x_VEC_FMA(ay4, ax4, vx);
  337. GGML_F16x_VEC_STORE(y + i + 3 * ggml_f16_epr, ay4, 3);
  338. ax5 = GGML_F16x_VEC_LOAD(x + i + 4 * ggml_f16_epr, 4);
  339. ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4);
  340. ay5 = GGML_F16x_VEC_FMA(ay5, ax5, vx);
  341. GGML_F16x_VEC_STORE(y + i + 4 * ggml_f16_epr, ay5, 4);
  342. ax6 = GGML_F16x_VEC_LOAD(x + i + 5 * ggml_f16_epr, 5);
  343. ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5);
  344. ay6 = GGML_F16x_VEC_FMA(ay6, ax6, vx);
  345. GGML_F16x_VEC_STORE(y + i + 5 * ggml_f16_epr, ay6, 5);
  346. ax7 = GGML_F16x_VEC_LOAD(x + i + 6 * ggml_f16_epr, 6);
  347. ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6);
  348. ay7 = GGML_F16x_VEC_FMA(ay7, ax7, vx);
  349. GGML_F16x_VEC_STORE(y + i + 6 * ggml_f16_epr, ay7, 6);
  350. ax8 = GGML_F16x_VEC_LOAD(x + i + 7 * ggml_f16_epr, 7);
  351. ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7);
  352. ay8 = GGML_F16x_VEC_FMA(ay8, ax8, vx);
  353. GGML_F16x_VEC_STORE(y + i + 7 * ggml_f16_epr, ay8, 7);
  354. }
  355. const int np2 = (n & ~(ggml_f16_epr - 1));
  356. for (int k = np; k < np2; k += ggml_f16_epr) {
  357. svfloat16_t rx = GGML_F16x_VEC_LOAD(x + k, 0);
  358. svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0);
  359. ry = GGML_F16x_VEC_FMA(ry, rx, vx);
  360. GGML_F16x_VEC_STORE(y + k, ry, 0);
  361. }
  362. if (np2 < n) {
  363. svbool_t pg = svwhilelt_b16(np2, n);
  364. svfloat16_t hx = svld1_f16(pg, (const __fp16 *)(x + np2));
  365. svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2));
  366. hy = svmad_f16_x(pg, hx, vx, hy);
  367. svst1_f16(pg, (__fp16 *)(y + np2), hy);
  368. }
  369. #elif defined(__riscv_v_intrinsic)
  370. // todo: RVV impl
  371. // scalar
  372. for (int i = 0; i < n; ++i) {
  373. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v);
  374. }
  375. #else
  376. const int np = (n & ~(GGML_F16_STEP - 1));
  377. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  378. GGML_F16_VEC ax[GGML_F16_ARR];
  379. GGML_F16_VEC ay[GGML_F16_ARR];
  380. for (int i = 0; i < np; i += GGML_F16_STEP) {
  381. for (int j = 0; j < GGML_F16_ARR; j++) {
  382. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  383. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  384. ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx);
  385. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  386. }
  387. }
  388. // leftovers
  389. for (int i = np; i < n; ++i) {
  390. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v);
  391. }
  392. #endif
  393. #else
  394. // scalar
  395. for (int i = 0; i < n; ++i) {
  396. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v);
  397. }
  398. #endif
  399. }
  400. // xs and vs are byte strides of x and v
  401. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * GGML_RESTRICT y, const float * GGML_RESTRICT xv, const float * GGML_RESTRICT vv) {
  402. const float * GGML_RESTRICT x[GGML_VEC_MAD_UNROLL];
  403. const float * GGML_RESTRICT v[GGML_VEC_MAD_UNROLL];
  404. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  405. x[i] = (const float *) ((const char *) xv + i*xs);
  406. v[i] = (const float *) ((const char *) vv + i*vs);
  407. }
  408. #if defined(GGML_SIMD)
  409. #if defined(__ARM_FEATURE_SVE)
  410. // scalar Route to scalar implementation //TODO: Write SVE code
  411. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  412. for (int i = 0; i < n; ++i) {
  413. y[i] += x[k][i]*v[k][0];
  414. }
  415. }
  416. #elif defined(__riscv_v_intrinsic)
  417. for (int i = 0, avl; i < n; i += avl) {
  418. avl = __riscv_vsetvl_e32m8(n - i);
  419. vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl);
  420. for (int k = 0; k < GGML_VEC_MAD_UNROLL; k++) {
  421. vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[k][i], avl);
  422. ay = __riscv_vfmadd_vf_f32m8(ax, v[k][0], ay, avl);
  423. }
  424. __riscv_vse32_v_f32m8(&y[i], ay, avl);
  425. }
  426. #else
  427. const int np = (n & ~(GGML_F32_STEP - 1));
  428. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  429. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  430. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  431. }
  432. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  433. GGML_F32_VEC ay[GGML_F32_ARR];
  434. for (int i = 0; i < np; i += GGML_F32_STEP) {
  435. for (int j = 0; j < GGML_F32_ARR; j++) {
  436. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  437. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  438. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  439. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  440. }
  441. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  442. }
  443. }
  444. // leftovers
  445. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  446. for (int i = np; i < n; ++i) {
  447. y[i] += x[k][i]*v[k][0];
  448. }
  449. }
  450. #endif
  451. #else
  452. // scalar
  453. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  454. for (int i = 0; i < n; ++i) {
  455. y[i] += x[k][i]*v[k][0];
  456. }
  457. }
  458. #endif
  459. }
  460. inline static void ggml_vec_mad1_f32(const int n, float * y, const float * x, const float s, const float b) {
  461. #if defined(GGML_USE_ACCELERATE)
  462. vDSP_vsmsa(x, 1, &s, &b, y, 1, n);
  463. #elif defined(GGML_SIMD)
  464. #if defined(__ARM_FEATURE_SVE)
  465. // scalar ; TODO: Write SVE code
  466. for (int i = 0; i < n; ++i) {
  467. y[i] = x[i]*s + b;
  468. }
  469. #elif defined(__riscv_v_intrinsic)
  470. for (int i = 0, avl; i < n; i += avl) {
  471. avl = __riscv_vsetvl_e32m8(n - i);
  472. vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[i], avl);
  473. vfloat32m8_t vb = __riscv_vfmv_v_f_f32m8(b, avl);
  474. vfloat32m8_t ny = __riscv_vfmadd_vf_f32m8(ax, s, vb, avl);
  475. __riscv_vse32_v_f32m8(&y[i], ny, avl);
  476. }
  477. #else
  478. const int np = (n & ~(GGML_F32_STEP - 1));
  479. GGML_F32_VEC vs = GGML_F32_VEC_SET1(s);
  480. GGML_F32_VEC vb = GGML_F32_VEC_SET1(b);
  481. GGML_F32_VEC ay[GGML_F32_ARR];
  482. for (int i = 0; i < np; i += GGML_F32_STEP) {
  483. for (int j = 0; j < GGML_F32_ARR; j++) {
  484. ay[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  485. ay[j] = GGML_F32_VEC_FMA(ay[j], vs, vb);
  486. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  487. }
  488. }
  489. // leftovers
  490. for (int i = np; i < n; ++i) {
  491. y[i] = x[i]*s + b;
  492. }
  493. #endif
  494. #else
  495. // scalar
  496. for (int i = 0; i < n; ++i) {
  497. y[i] = x[i]*s + b;
  498. }
  499. #endif
  500. }
  501. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  502. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  503. #if defined(GGML_USE_ACCELERATE)
  504. vDSP_vsmul(y, 1, &v, y, 1, n);
  505. #elif defined(GGML_SIMD)
  506. #if defined(__ARM_FEATURE_SVE)
  507. const int sve_register_length = ggml_cpu_get_sve_cnt() * 8;
  508. const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16
  509. const int ggml_f32_step = 2 * ggml_f32_epr;
  510. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  511. const int np = (n & ~(ggml_f32_step - 1));
  512. svfloat32_t ay1;
  513. svfloat32_t ay2;
  514. for (int i = 0; i < np; i += ggml_f32_step) {
  515. ay1 = GGML_F32_VEC_LOAD(y + i);
  516. ay1 = GGML_F32_VEC_MUL(ay1, vx);
  517. GGML_F32_VEC_STORE(y + i, ay1);
  518. ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr);
  519. ay2 = GGML_F32_VEC_MUL(ay2, vx);
  520. GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2);
  521. }
  522. // leftovers
  523. // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only
  524. if (np < n) {
  525. svbool_t pg = svwhilelt_b32(np, n);
  526. ay1 = svld1_f32(pg, y + np);
  527. ay1 = svmul_f32_m(pg, ay1, vx);
  528. svst1_f32(pg, y + np, ay1);
  529. }
  530. #elif defined(__riscv_v_intrinsic)
  531. for (int i = 0, avl; i < n; i += avl) {
  532. avl = __riscv_vsetvl_e32m8(n - i);
  533. vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl);
  534. vfloat32m8_t ny = __riscv_vfmul_vf_f32m8(ay, v, avl);
  535. __riscv_vse32_v_f32m8(&y[i], ny, avl);
  536. }
  537. #else
  538. const int np = (n & ~(GGML_F32_STEP - 1));
  539. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  540. GGML_F32_VEC ay[GGML_F32_ARR];
  541. for (int i = 0; i < np; i += GGML_F32_STEP) {
  542. for (int j = 0; j < GGML_F32_ARR; j++) {
  543. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  544. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  545. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  546. }
  547. }
  548. // leftovers
  549. for (int i = np; i < n; ++i) {
  550. y[i] *= v;
  551. }
  552. #endif
  553. #else
  554. // scalar
  555. for (int i = 0; i < n; ++i) {
  556. y[i] *= v;
  557. }
  558. #endif
  559. }
  560. inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float v) {
  561. #if defined(GGML_SIMD)
  562. #if defined(__ARM_FEATURE_SVE)
  563. const int sve_register_length = svcntb() * 8;
  564. const int ggml_f16_epr = sve_register_length / 16;
  565. const int ggml_f16_step = 2 * ggml_f16_epr;
  566. GGML_F16x_VEC vx = GGML_F16x_VEC_SET1(v);
  567. const int np = (n & ~(ggml_f16_step - 1));
  568. svfloat16_t ay1, ay2;
  569. for (int i = 0; i < np; i += ggml_f16_step) {
  570. ay1 = GGML_F16x_VEC_LOAD(y + i + 0*ggml_f16_epr, 0);
  571. ay1 = GGML_F16x_VEC_MUL(ay1, vx);
  572. GGML_F16x_VEC_STORE(y + i + 0*ggml_f16_epr, ay1, 0);
  573. ay2 = GGML_F16x_VEC_LOAD(y + i + 1*ggml_f16_epr, 1);
  574. ay2 = GGML_F16x_VEC_MUL(ay2, vx);
  575. GGML_F16x_VEC_STORE(y + i + 1*ggml_f16_epr, ay2, 1);
  576. }
  577. // leftovers
  578. // maximum number of leftover elements will be less that ggmlF_16x_epr. Apply predicated svmad on available elements only
  579. if (np < n) {
  580. svbool_t pg = svwhilelt_b16(np, n);
  581. svfloat16_t hy = svld1_f16(pg, (__fp16 *)(y + np));
  582. svfloat16_t out = svmul_f16_m(pg, hy, vx);
  583. svst1_f16(pg, (__fp16 *)(y + np), out);
  584. }
  585. #elif defined(__riscv_v_intrinsic)
  586. // todo: RVV impl
  587. // scalar
  588. for (int i = 0; i < n; ++i) {
  589. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v);
  590. }
  591. #else
  592. const int np = (n & ~(GGML_F16_STEP - 1));
  593. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  594. GGML_F16_VEC ay[GGML_F16_ARR];
  595. for (int i = 0; i < np; i += GGML_F16_STEP) {
  596. for (int j = 0; j < GGML_F16_ARR; j++) {
  597. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  598. ay[j] = GGML_F16_VEC_MUL(ay[j], vx);
  599. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  600. }
  601. }
  602. // leftovers
  603. for (int i = np; i < n; ++i) {
  604. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v);
  605. }
  606. #endif
  607. #else
  608. // scalar
  609. for (int i = 0; i < n; ++i) {
  610. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v);
  611. }
  612. #endif
  613. }
  614. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); }
  615. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  616. inline static void ggml_vec_sqr_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  617. for (int i = 0; i < n; ++i) {
  618. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  619. y[i] = GGML_CPU_FP32_TO_FP16(v*v);
  620. }
  621. }
  622. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  623. inline static void ggml_vec_sqrt_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  624. for (int i = 0; i < n; ++i) {
  625. y[i] = GGML_CPU_FP32_TO_FP16(sqrtf(GGML_CPU_FP16_TO_FP32(x[i])));
  626. }
  627. }
  628. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  629. inline static void ggml_vec_log_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  630. for (int i = 0; i < n; ++i) {
  631. y[i] = GGML_CPU_FP32_TO_FP16(logf(GGML_CPU_FP16_TO_FP32(x[i])));
  632. }
  633. }
  634. inline static void ggml_vec_sin_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sinf(x[i]); }
  635. inline static void ggml_vec_sin_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  636. for (int i = 0; i < n; ++i) {
  637. y[i] = GGML_CPU_FP32_TO_FP16(sinf(GGML_CPU_FP16_TO_FP32(x[i])));
  638. }
  639. }
  640. inline static void ggml_vec_cos_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = cosf(x[i]); }
  641. inline static void ggml_vec_cos_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  642. for (int i = 0; i < n; ++i) {
  643. y[i] = GGML_CPU_FP32_TO_FP16(cosf(GGML_CPU_FP16_TO_FP32(x[i])));
  644. }
  645. }
  646. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  647. inline static void ggml_vec_abs_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  648. for (int i = 0; i < n; ++i) {
  649. y[i] = GGML_CPU_FP32_TO_FP16(fabsf(GGML_CPU_FP16_TO_FP32(x[i])));
  650. }
  651. }
  652. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  653. inline static void ggml_vec_sgn_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  654. for (int i = 0; i < n; ++i) {
  655. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  656. y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f));
  657. }
  658. }
  659. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  660. inline static void ggml_vec_step_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  661. for (int i = 0; i < n; ++i) {
  662. y[i] = GGML_CPU_FP32_TO_FP16((GGML_CPU_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f);
  663. }
  664. }
  665. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  666. inline static void ggml_vec_tanh_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  667. for (int i = 0; i < n; ++i) {
  668. y[i] = GGML_CPU_FP32_TO_FP16(tanhf(GGML_CPU_FP16_TO_FP32(x[i])));
  669. }
  670. }
  671. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expm1f(x[i]); }
  672. inline static void ggml_vec_elu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  673. for (int i = 0; i < n; ++i) {
  674. y[i] = GGML_CPU_FP32_TO_FP16(expm1f(GGML_CPU_FP16_TO_FP32(x[i])));
  675. }
  676. }
  677. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  678. inline static void ggml_vec_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  679. for (int i = 0; i < n; ++i) {
  680. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  681. y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v : 0.f);
  682. }
  683. }
  684. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  685. inline static void ggml_vec_leaky_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const float ns) {
  686. for (int i = 0; i < n; ++i) {
  687. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  688. y[i] = GGML_CPU_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f));
  689. }
  690. }
  691. inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); }
  692. inline static void ggml_vec_sigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  693. for (int i = 0; i < n; ++i) {
  694. y[i] = GGML_CPU_FP32_TO_FP16(1.f / (1.f + expf(-GGML_CPU_FP16_TO_FP32(x[i]))));
  695. }
  696. }
  697. // TODO: optimize performance
  698. inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  699. inline static void ggml_vec_hardswish_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  700. for (int i = 0; i < n; ++i) {
  701. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  702. y[i] = GGML_CPU_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f)));
  703. }
  704. }
  705. inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  706. inline static void ggml_vec_hardsigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  707. for (int i = 0; i < n; ++i) {
  708. y[i] = GGML_CPU_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_CPU_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f)));
  709. }
  710. }
  711. inline static void ggml_vec_exp_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = expf(x[i]); }
  712. inline static void ggml_vec_exp_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  713. for (int i = 0; i < n; ++i) {
  714. y[i] = GGML_CPU_FP32_TO_FP16(expf(GGML_CPU_FP16_TO_FP32(x[i])));
  715. }
  716. }
  717. static const float GELU_COEF_A = 0.044715f;
  718. static const float GELU_QUICK_COEF = -1.702f;
  719. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  720. static const float SQRT_2_INV = 0.70710678118654752440084436210484f;
  721. inline static float ggml_gelu_f32(float x) {
  722. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  723. }
  724. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  725. const uint16_t * i16 = (const uint16_t *) x;
  726. for (int i = 0; i < n; ++i) {
  727. y[i] = ggml_table_gelu_f16[i16[i]];
  728. }
  729. }
  730. inline static void ggml_vec_gelu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  731. for (int i = 0; i < n; ++i) {
  732. float xi = GGML_CPU_FP16_TO_FP32(x[i]);
  733. float res = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV));
  734. y[i] = GGML_CPU_FP32_TO_FP16(res);
  735. }
  736. }
  737. #ifdef GGML_GELU_FP16
  738. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  739. uint16_t t;
  740. for (int i = 0; i < n; ++i) {
  741. if (x[i] <= -10.0f) {
  742. y[i] = 0.0f;
  743. } else if (x[i] >= 10.0f) {
  744. y[i] = x[i];
  745. } else {
  746. ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]);
  747. memcpy(&t, &fp16, sizeof(uint16_t));
  748. y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  749. }
  750. }
  751. }
  752. #else
  753. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  754. for (int i = 0; i < n; ++i) {
  755. y[i] = ggml_gelu_f32(x[i]);
  756. }
  757. }
  758. #endif
  759. inline static void ggml_vec_gelu_erf_f32(const int n, float * y, const float * x) {
  760. for (int i = 0; i < n; ++i) {
  761. float xi = x[i];
  762. y[i] = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV));
  763. }
  764. }
  765. inline static float ggml_gelu_quick_f32(float x) {
  766. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  767. }
  768. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  769. // const uint16_t * i16 = (const uint16_t *) x;
  770. // for (int i = 0; i < n; ++i) {
  771. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  772. // }
  773. //}
  774. #ifdef GGML_GELU_QUICK_FP16
  775. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  776. uint16_t t;
  777. for (int i = 0; i < n; ++i) {
  778. ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]);
  779. memcpy(&t, &fp16, sizeof(uint16_t));
  780. y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  781. }
  782. }
  783. #else
  784. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  785. for (int i = 0; i < n; ++i) {
  786. y[i] = ggml_gelu_quick_f32(x[i]);
  787. }
  788. }
  789. #endif
  790. inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  791. for (int i = 0; i < n; ++i) {
  792. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  793. y[i] = GGML_CPU_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v))));
  794. }
  795. }
  796. // Sigmoid Linear Unit (SiLU) function
  797. inline static float ggml_silu_f32(float x) {
  798. return x/(1.0f + expf(-x));
  799. }
  800. inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) {
  801. float v = GGML_CPU_FP16_TO_FP32(x);
  802. return GGML_CPU_FP32_TO_FP16(v/(1.0f + expf(-v)));
  803. }
  804. #if __FINITE_MATH_ONLY__
  805. #error "some routines in ggml.c require non-finite math arithmetics -- pass -fno-finite-math-only to the compiler to fix"
  806. #error "ref: https://github.com/ggml-org/llama.cpp/pull/7154#issuecomment-2143844461"
  807. #endif
  808. /* Below function was borrowed from the GitHub repository:
  809. https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp */
  810. #if defined(__ARM_FEATURE_SVE) && defined(__aarch64__)
  811. inline static svfloat32_t exp_ps_sve(svbool_t pg, svfloat32_t src) {
  812. // Constants
  813. const svfloat32_t log2_e = svdup_n_f32(1.4426950409f);
  814. const svfloat32_t ln2 = svdup_n_f32(0.6931473921f);
  815. const svfloat32_t half_ln2_sq = svdup_n_f32(0.2413862043f);
  816. const svuint32_t not_mask17 = svdup_n_u32(~((1u << 17) - 1));
  817. const svfloat32_t one = svdup_n_f32(1.0f);
  818. const svfloat32_t inactive1 = svdup_n_f32(0.0f);
  819. const svint32_t inactive2 = svdup_n_s32(0);
  820. // Algorithm starts here
  821. svfloat32_t t0 = svmul_f32_m(pg, src, log2_e); // y = x * log2(e)
  822. svfloat32_t t1 = svrintm_f32_m(inactive1, pg, t0); // rount to int (float)
  823. svint32_t t2 = svcvt_s32_f32_m(inactive2, pg, t1); // n
  824. t1 = svsub_f32_m(pg, t0, t1); // a = y - floor(y)
  825. t1 = svadd_f32_m(pg, t1, one); // b = a + 1
  826. svuint32_t t3 = svlsr_n_u32_m(pg, svreinterpret_u32_f32(t1), 17); // v = b >> 17 (u32)
  827. svfloat32_t t4 = svexpa_f32(t3); // c = fexpa(v)
  828. t4 = svscale_f32_m(pg, t4, t2); // fexpa(v) * 2^(n)
  829. // and_(t2.d, t1.d, not_mask17.d)
  830. svfloat32_t t5 = svreinterpret_f32_u32(svand_u32_m(pg, svreinterpret_u32_f32(t1), not_mask17));
  831. t5 = svsub_f32_m(pg, t1, t5); // z
  832. t0 = svmla_f32_m(pg, ln2, t5, half_ln2_sq); // ln2 + half_ln2_sq * z
  833. t0 = svmla_f32_m(pg, one, t5, t0); // 1 + (ln2 * z) + (half_ln2_sq * z * z)
  834. t0 = svmul_f32_m(pg, t0, t4); // Final result
  835. return t0;
  836. }
  837. #endif
  838. #if defined(__ARM_FEATURE_SVE) && defined(__aarch64__)
  839. inline static svfloat32_t ggml_v_expf(svbool_t pg, svfloat32_t x) {
  840. const svfloat32_t r = svdup_n_f32_x(pg, 0x1.8p23f);
  841. const svfloat32_t z = svmla_n_f32_x(pg, r, x, 0x1.715476p+0f);
  842. const svfloat32_t n = svsub_f32_x(pg, z, r);
  843. const svfloat32_t b = svmls_n_f32_x(pg, svmls_n_f32_x(pg, x, n, 0x1.62e4p-1f), n, 0x1.7f7d1cp-20f);
  844. const svuint32_t e = svlsl_n_u32_x(pg, svreinterpret_u32_f32(z), 23);
  845. const svfloat32_t k = svreinterpret_f32_u32(svadd_u32_x(pg, e, svreinterpret_u32_f32(svdup_n_f32_x(pg, 1))));
  846. const svbool_t c = svacgt_n_f32(pg, n, 126);
  847. const svfloat32_t u = svmul_f32_x(pg, b, b);
  848. const svfloat32_t j = svmla_f32_x(pg,
  849. svmul_n_f32_x(pg, b, 0x1.ffffecp-1f),
  850. svmla_f32_x(pg, svmla_f32_x(pg, svdup_n_f32_x(pg, 0x1.fffdb6p-2f), svdup_n_f32_x(pg, 0x1.555e66p-3f), b),
  851. svmla_f32_x(pg, svdup_n_f32_x(pg, 0x1.573e2ep-5f), svdup_n_f32_x(pg, 0x1.0e4020p-7f), b), u), u);
  852. const svuint32_t d = svdup_n_u32_z(svcmple_n_f32(pg, n, 0.0), 0x82000000);
  853. const svfloat32_t s1 = svreinterpret_f32_u32(svadd_n_u32_x(pg, d, 0x7f000000));
  854. const svfloat32_t s2 = svreinterpret_f32_u32(svsub_u32_x(pg, e, d));
  855. return svsel_f32(svacgt_f32(pg, n, svdup_n_f32_x(pg, 192)), svmul_f32_x(pg, s1, s1),
  856. svsel_f32(c, svmul_f32_x(pg, svmla_f32_x(pg, s2, s2, j), s1), svmla_f32_x(pg, k, k, j)));
  857. }
  858. // computes silu x/(1+exp(-x)) in single precision vector
  859. inline static svfloat32_t ggml_v_silu(svbool_t pg, svfloat32_t x) {
  860. const svfloat32_t one = svdup_n_f32_x(pg, 1.0f);
  861. const svfloat32_t zero = svdup_n_f32_x(pg, 0.0f);
  862. const svfloat32_t neg_x = svsub_f32_x(pg, zero, x);
  863. const svfloat32_t exp_neg_x = ggml_v_expf(pg, neg_x);
  864. const svfloat32_t one_plus_exp_neg_x = svadd_f32_x(pg, one, exp_neg_x);
  865. return svdiv_f32_x(pg, x, one_plus_exp_neg_x);
  866. }
  867. #elif defined(__ARM_NEON) && defined(__aarch64__)
  868. // adapted from arm limited optimized routine
  869. // the maximum error is 1.45358 plus 0.5 ulps
  870. // numbers above 88.38 will flush to infinity
  871. // numbers beneath -103.97 will flush to zero
  872. inline static float32x4_t ggml_v_expf(float32x4_t x) {
  873. const float32x4_t r = vdupq_n_f32(0x1.8p23f);
  874. const float32x4_t z = vfmaq_f32(r, x, vdupq_n_f32(0x1.715476p+0f));
  875. const float32x4_t n = vsubq_f32(z, r);
  876. const float32x4_t b = vfmsq_f32(vfmsq_f32(x, n, vdupq_n_f32(0x1.62e4p-1f)), n,
  877. vdupq_n_f32(0x1.7f7d1cp-20f));
  878. const uint32x4_t e = vshlq_n_u32(vreinterpretq_u32_f32(z), 23);
  879. const float32x4_t k = vreinterpretq_f32_u32(vaddq_u32(e, vreinterpretq_u32_f32(vdupq_n_f32(1))));
  880. const uint32x4_t c = vcagtq_f32(n, vdupq_n_f32(126));
  881. const float32x4_t u = vmulq_f32(b, b);
  882. const float32x4_t j = vfmaq_f32(
  883. vmulq_f32(vdupq_n_f32(0x1.ffffecp-1f), b),
  884. vfmaq_f32(vfmaq_f32(vdupq_n_f32(0x1.fffdb6p-2f), vdupq_n_f32(0x1.555e66p-3f), b),
  885. vfmaq_f32(vdupq_n_f32(0x1.573e2ep-5f), vdupq_n_f32(0x1.0e4020p-7f), b), u), u);
  886. if (!vpaddd_u64(vreinterpretq_u64_u32(c)))
  887. return vfmaq_f32(k, j, k);
  888. const uint32x4_t d = vandq_u32(vclezq_f32(n), vdupq_n_u32(0x82000000));
  889. const float32x4_t s1 = vreinterpretq_f32_u32(vaddq_u32(d, vdupq_n_u32(0x7f000000)));
  890. const float32x4_t s2 = vreinterpretq_f32_u32(vsubq_u32(e, d));
  891. return vbslq_f32(vcagtq_f32(n, vdupq_n_f32(192)), vmulq_f32(s1, s1),
  892. vbslq_f32(c, vmulq_f32(vfmaq_f32(s2, s2, j), s1), vfmaq_f32(k, k, j)));
  893. }
  894. // computes silu x/(1+exp(-x)) in single precision vector
  895. inline static float32x4_t ggml_v_silu(float32x4_t x) {
  896. const float32x4_t one = vdupq_n_f32(1.0f);
  897. const float32x4_t zero = vdupq_n_f32(0.0f);
  898. const float32x4_t neg_x = vsubq_f32(zero, x);
  899. const float32x4_t exp_neg_x = ggml_v_expf(neg_x);
  900. const float32x4_t one_plus_exp_neg_x = vaddq_f32(one, exp_neg_x);
  901. return vdivq_f32(x, one_plus_exp_neg_x);
  902. }
  903. #elif defined(__AVX512F__) && defined(__AVX512DQ__)
  904. // adapted from arm limited optimized routine
  905. // the maximum error is 1.45358 plus 0.5 ulps
  906. // numbers above 88.38 will flush to infinity
  907. // numbers beneath -103.97 will flush to zero
  908. inline static __m512 ggml_v_expf(__m512 x) {
  909. const __m512 r = _mm512_set1_ps(0x1.8p23f);
  910. const __m512 z = _mm512_fmadd_ps(x, _mm512_set1_ps(0x1.715476p+0f), r);
  911. const __m512 n = _mm512_sub_ps(z, r);
  912. const __m512 b =
  913. _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f),
  914. _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x));
  915. const __mmask16 d =
  916. _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(192), _CMP_GT_OQ);
  917. const __m512 u = _mm512_mul_ps(b, b);
  918. const __m512 j = _mm512_fmadd_ps(
  919. _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b,
  920. _mm512_set1_ps(0x1.573e2ep-5f)),
  921. u,
  922. _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b,
  923. _mm512_set1_ps(0x1.fffdb6p-2f))),
  924. u,
  925. _mm512_fmadd_ps(_mm512_set1_ps(0x1.ffffecp-1f), b, _mm512_set1_ps(1.0F)));
  926. const __m512 res = _mm512_scalef_ps(j, n);
  927. if (_mm512_kortestz(d, d))
  928. return res;
  929. const __m512 zero = _mm512_setzero_ps();
  930. const __m512 alt = _mm512_mask_blend_ps(
  931. _mm512_cmp_ps_mask(n, zero, _CMP_LE_OQ), _mm512_set1_ps(INFINITY), zero);
  932. return _mm512_mask_blend_ps(d, res, alt);
  933. }
  934. // computes silu x/(1+exp(-x)) in single precision vector
  935. inline static __m512 ggml_v_silu(__m512 x) {
  936. const __m512 one = _mm512_set1_ps(1);
  937. const __m512 zero = _mm512_setzero_ps();
  938. const __m512 neg_x = _mm512_sub_ps(zero, x);
  939. const __m512 exp_neg_x = ggml_v_expf(neg_x);
  940. const __m512 one_plus_exp_neg_x = _mm512_add_ps(one, exp_neg_x);
  941. return _mm512_div_ps(x, one_plus_exp_neg_x);
  942. }
  943. #elif defined(__AVX2__) && defined(__FMA__)
  944. // adapted from arm limited optimized routine
  945. // the maximum error is 1.45358 plus 0.5 ulps
  946. // numbers above 88.38 will flush to infinity
  947. // numbers beneath -103.97 will flush to zero
  948. inline static __m256 ggml_v_expf(__m256 x) {
  949. const __m256 r = _mm256_set1_ps(0x1.8p23f);
  950. const __m256 z = _mm256_fmadd_ps(x, _mm256_set1_ps(0x1.715476p+0f), r);
  951. const __m256 n = _mm256_sub_ps(z, r);
  952. const __m256 b = _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.7f7d1cp-20f),
  953. _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.62e4p-1f), x));
  954. const __m256i e = _mm256_slli_epi32(_mm256_castps_si256(z), 23);
  955. const __m256 k = _mm256_castsi256_ps(
  956. _mm256_add_epi32(e, _mm256_castps_si256(_mm256_set1_ps(1))));
  957. const __m256i c = _mm256_castps_si256(
  958. _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n),
  959. _mm256_set1_ps(126), _CMP_GT_OQ));
  960. const __m256 u = _mm256_mul_ps(b, b);
  961. const __m256 j = _mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_set1_ps(0x1.0e4020p-7f), b,
  962. _mm256_set1_ps(0x1.573e2ep-5f)), u,
  963. _mm256_fmadd_ps(_mm256_set1_ps(0x1.555e66p-3f), b,
  964. _mm256_set1_ps(0x1.fffdb6p-2f))),
  965. u, _mm256_mul_ps(_mm256_set1_ps(0x1.ffffecp-1f), b));
  966. if (!_mm256_movemask_ps(_mm256_castsi256_ps(c)))
  967. return _mm256_fmadd_ps(j, k, k);
  968. const __m256i g = _mm256_and_si256(
  969. _mm256_castps_si256(_mm256_cmp_ps(n, _mm256_setzero_ps(), _CMP_LE_OQ)),
  970. _mm256_set1_epi32(0x82000000u));
  971. const __m256 s1 =
  972. _mm256_castsi256_ps(_mm256_add_epi32(g, _mm256_set1_epi32(0x7f000000u)));
  973. const __m256 s2 = _mm256_castsi256_ps(_mm256_sub_epi32(e, g));
  974. const __m256i d = _mm256_castps_si256(
  975. _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n),
  976. _mm256_set1_ps(192), _CMP_GT_OQ));
  977. return _mm256_or_ps(
  978. _mm256_and_ps(_mm256_castsi256_ps(d), _mm256_mul_ps(s1, s1)),
  979. _mm256_andnot_ps(
  980. _mm256_castsi256_ps(d),
  981. _mm256_or_ps(
  982. _mm256_and_ps(_mm256_castsi256_ps(c),
  983. _mm256_mul_ps(_mm256_fmadd_ps(s2, j, s2), s1)),
  984. _mm256_andnot_ps(_mm256_castsi256_ps(c), _mm256_fmadd_ps(k, j, k)))));
  985. }
  986. // computes silu x/(1+exp(-x)) in single precision vector
  987. inline static __m256 ggml_v_silu(__m256 x) {
  988. const __m256 one = _mm256_set1_ps(1);
  989. const __m256 zero = _mm256_setzero_ps();
  990. const __m256 neg_x = _mm256_sub_ps(zero, x);
  991. const __m256 exp_neg_x = ggml_v_expf(neg_x);
  992. const __m256 one_plus_exp_neg_x = _mm256_add_ps(one, exp_neg_x);
  993. return _mm256_div_ps(x, one_plus_exp_neg_x);
  994. }
  995. #elif defined(__SSE2__) // __AVX2__ / __ARM_NEON
  996. #if defined(__FMA__)
  997. #define MADD128(x, y, z) _mm_fmadd_ps(x, y, z)
  998. #define NMADD128(x, y, z) _mm_fnmadd_ps(x, y, z)
  999. #else
  1000. #define MADD128(x, y, z) _mm_add_ps(_mm_mul_ps(x, y), z)
  1001. #define NMADD128(x, y, z) _mm_sub_ps(z, _mm_mul_ps(x, y))
  1002. #endif
  1003. // adapted from arm limited optimized routine
  1004. // the maximum error is 1.45358 plus 0.5 ulps
  1005. // numbers above 88.38 will flush to infinity
  1006. // numbers beneath -103.97 will flush to zero
  1007. inline static __m128 ggml_v_expf(__m128 x) {
  1008. const __m128 r = _mm_set1_ps(0x1.8p23f);
  1009. const __m128 z = MADD128(x, _mm_set1_ps(0x1.715476p+0f), r);
  1010. const __m128 n = _mm_sub_ps(z, r);
  1011. const __m128 b =
  1012. NMADD128(n, _mm_set1_ps(0x1.7f7d1cp-20f), NMADD128(n, _mm_set1_ps(0x1.62e4p-1f), x));
  1013. const __m128i e = _mm_slli_epi32(_mm_castps_si128(z), 23);
  1014. const __m128 k = _mm_castsi128_ps(_mm_add_epi32(e, _mm_castps_si128(_mm_set1_ps(1))));
  1015. const __m128i c =
  1016. _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(126)));
  1017. const __m128 u = _mm_mul_ps(b, b);
  1018. const __m128 j =
  1019. MADD128(MADD128(MADD128(_mm_set1_ps(0x1.0e4020p-7f), b, _mm_set1_ps(0x1.573e2ep-5f)), u,
  1020. MADD128(_mm_set1_ps(0x1.555e66p-3f), b, _mm_set1_ps(0x1.fffdb6p-2f))),
  1021. u, _mm_mul_ps(_mm_set1_ps(0x1.ffffecp-1f), b));
  1022. if (!_mm_movemask_epi8(c))
  1023. return MADD128(j, k, k);
  1024. const __m128i g = _mm_and_si128(_mm_castps_si128(_mm_cmple_ps(n, _mm_setzero_ps())),
  1025. _mm_set1_epi32(0x82000000u));
  1026. const __m128 s1 = _mm_castsi128_ps(_mm_add_epi32(g, _mm_set1_epi32(0x7f000000u)));
  1027. const __m128 s2 = _mm_castsi128_ps(_mm_sub_epi32(e, g));
  1028. const __m128i d =
  1029. _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(192)));
  1030. return _mm_or_ps(
  1031. _mm_and_ps(_mm_castsi128_ps(d), _mm_mul_ps(s1, s1)),
  1032. _mm_andnot_ps(_mm_castsi128_ps(d),
  1033. _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), _mm_mul_ps(MADD128(s2, j, s2), s1)),
  1034. _mm_andnot_ps(_mm_castsi128_ps(c), MADD128(k, j, k)))));
  1035. }
  1036. // computes silu x/(1+exp(-x)) in single precision vector
  1037. inline static __m128 ggml_v_silu(__m128 x) {
  1038. const __m128 one = _mm_set1_ps(1);
  1039. const __m128 zero = _mm_setzero_ps();
  1040. const __m128 neg_x = _mm_sub_ps(zero, x);
  1041. const __m128 exp_neg_x = ggml_v_expf(neg_x);
  1042. const __m128 one_plus_exp_neg_x = _mm_add_ps(one, exp_neg_x);
  1043. return _mm_div_ps(x, one_plus_exp_neg_x);
  1044. }
  1045. #elif defined(__riscv_v_intrinsic)
  1046. // adapted from arm limited optimized routine
  1047. // the maximum error is 1.45358 plus 0.5 ulps
  1048. // numbers above 88.38 will flush to infinity
  1049. // numbers beneath -103.97 will flush to zero
  1050. inline static vfloat32m2_t ggml_v_expf_m2(vfloat32m2_t x, int vl) {
  1051. const vfloat32m2_t r = __riscv_vfmv_v_f_f32m2(0x1.8p23f, vl);
  1052. #ifdef __riscv_xtheadvector
  1053. // workaround for compiler bug (gcc 14.3.0: Error: unrecognized opcode `th.vmv1r.v v2,v4')
  1054. vfloat32m2_t z = __riscv_vfadd_vf_f32m2(r, 0.0f, vl);
  1055. z = __riscv_vfmacc_vf_f32m2(z, 0x1.715476p+0f, x, vl);
  1056. #else
  1057. const vfloat32m2_t z = __riscv_vfmacc_vf_f32m2(r, 0x1.715476p+0f, x, vl);
  1058. #endif
  1059. const vfloat32m2_t n = __riscv_vfsub_vv_f32m2(z, r, vl);
  1060. const vfloat32m2_t b = __riscv_vfnmsac_vf_f32m2(__riscv_vfnmsac_vf_f32m2(x, 0x1.62e4p-1f, n, vl),
  1061. 0x1.7f7d1cp-20f, n, vl);
  1062. const vuint32m2_t e = __riscv_vsll_vx_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(z), 23, vl);
  1063. const vfloat32m2_t k = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vadd_vx_u32m2(e, 0x3f800000, vl)); // 1.0f
  1064. const vbool16_t c = __riscv_vmfgt_vf_f32m2_b16(__riscv_vfabs_v_f32m2(n, vl), 126.0f, vl);
  1065. const vfloat32m2_t u = __riscv_vfmul_vv_f32m2(b, b, vl);
  1066. const vfloat32m2_t j = __riscv_vfmacc_vv_f32m2(
  1067. __riscv_vfmul_vf_f32m2(b, 0x1.ffffecp-1f, vl),
  1068. __riscv_vfmacc_vv_f32m2(
  1069. __riscv_vfmacc_vf_f32m2(__riscv_vfmv_v_f_f32m2(0x1.fffdb6p-2f, vl), 0x1.555e66p-3f, b, vl),
  1070. __riscv_vfmacc_vf_f32m2(__riscv_vfmv_v_f_f32m2(0x1.573e2ep-5f, vl), 0x1.0e4020p-7f, b, vl),
  1071. u, vl), u, vl);
  1072. if (!__riscv_vcpop_m_b16(c, vl))
  1073. return __riscv_vfmacc_vv_f32m2(k, j, k, vl);
  1074. const vbool16_t dm = __riscv_vmfle_vf_f32m2_b16(n, 0.0f, vl);
  1075. const vuint32m2_t d = __riscv_vmerge_vxm_u32m2(__riscv_vmv_v_x_u32m2(0, vl), 0x82000000, dm, vl);
  1076. const vfloat32m2_t s1 = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vadd_vx_u32m2(d, 0x7f000000, vl));
  1077. const vfloat32m2_t s2 = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vsub_vv_u32m2(e, d, vl));
  1078. const vfloat32m2_t r1 = __riscv_vmerge_vvm_f32m2(
  1079. __riscv_vfmacc_vv_f32m2(k, k, j, vl),
  1080. __riscv_vfmul_vv_f32m2(__riscv_vfmacc_vv_f32m2(s2, s2, j, vl), s1, vl),
  1081. c, vl);
  1082. return __riscv_vmerge_vvm_f32m2(
  1083. r1, __riscv_vfmul_vv_f32m2(s1, s1, vl),
  1084. __riscv_vmfgt_vf_f32m2_b16(__riscv_vfabs_v_f32m2(n, vl), 192.0f, vl),
  1085. vl);
  1086. }
  1087. #endif // __ARM_NEON / __AVX2__ / __SSE2__ / __riscv_v_intrinsic
  1088. inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1089. for (int i = 0; i < n; ++i) {
  1090. y[i] = ggml_silu_f16(x[i]);
  1091. }
  1092. }
  1093. inline static float ggml_silu_backward_f32(float x, float dy) {
  1094. const float s = 1.0f/(1.0f + expf(-x));
  1095. return dy*s*(1.0f + x*(1.0f - s));
  1096. }
  1097. inline static ggml_fp16_t ggml_silu_backward_f16(ggml_fp16_t x, ggml_fp16_t dy) {
  1098. const float v = GGML_CPU_FP16_TO_FP32(x);
  1099. const float s = 1.0f/(1.0f + expf(-v));
  1100. return GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s)));
  1101. }
  1102. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1103. for (int i = 0; i < n; ++i) {
  1104. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1105. }
  1106. }
  1107. inline static void ggml_vec_silu_backward_f16(const int n, ggml_fp16_t * dx, const ggml_fp16_t * x, const ggml_fp16_t * dy) {
  1108. for (int i = 0; i < n; ++i) {
  1109. dx[i] = ggml_silu_backward_f16(x[i], dy[i]);
  1110. }
  1111. }
  1112. inline static void ggml_vec_reglu_f32 (const int n, float * y, const float * x, const float * g) {
  1113. for (int i = 0; i < n; ++i) {
  1114. y[i] = (x[i] > 0.f) ? x[i] * g[i] : 0.f;
  1115. }
  1116. }
  1117. inline static void ggml_vec_reglu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) {
  1118. for (int i = 0; i < n; ++i) {
  1119. float v = GGML_CPU_FP16_TO_FP32(x[i]);
  1120. y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v * GGML_CPU_FP16_TO_FP32(g[i]) : 0.f);
  1121. }
  1122. }
  1123. #ifdef GGML_GELU_FP16
  1124. inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) {
  1125. uint16_t t;
  1126. for (int i = 0; i < n; ++i) {
  1127. if (x[i] <= -10.0f) {
  1128. y[i] = 0.0f;
  1129. } else if (x[i] >= 10.0f) {
  1130. y[i] = x[i] * g[i];
  1131. } else {
  1132. ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]);
  1133. memcpy(&t, &fp16, sizeof(uint16_t));
  1134. y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i];
  1135. }
  1136. }
  1137. }
  1138. #else
  1139. inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) {
  1140. for (int i = 0; i < n; ++i) {
  1141. y[i] = ggml_gelu_f32(x[i]) * g[i];
  1142. }
  1143. }
  1144. #endif
  1145. inline static void ggml_vec_geglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) {
  1146. const uint16_t * i16 = (const uint16_t *) x;
  1147. for (int i = 0; i < n; ++i) {
  1148. float v = GGML_CPU_FP16_TO_FP32(g[i]);
  1149. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v);
  1150. }
  1151. }
  1152. void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g);
  1153. inline static void ggml_vec_swiglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) {
  1154. for (int i = 0; i < n; ++i) {
  1155. float xi = GGML_CPU_FP16_TO_FP32(x[i]);
  1156. float gi = GGML_CPU_FP16_TO_FP32(g[i]);
  1157. y[i] = GGML_CPU_FP32_TO_FP16((xi/(1.0f + expf(-xi))) * gi);
  1158. }
  1159. }
  1160. inline static void ggml_vec_geglu_erf_f32(const int n, float * y, const float * x, const float * g) {
  1161. for (int i = 0; i < n; ++i) {
  1162. float xi = x[i];
  1163. y[i] = 0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * g[i];
  1164. }
  1165. }
  1166. inline static void ggml_vec_geglu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) {
  1167. for (int i = 0; i < n; ++i) {
  1168. float xi = GGML_CPU_FP16_TO_FP32(x[i]);
  1169. float gi = GGML_CPU_FP16_TO_FP32(g[i]);
  1170. y[i] = GGML_CPU_FP32_TO_FP16(0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * gi);
  1171. }
  1172. }
  1173. #ifdef GGML_GELU_QUICK_FP16
  1174. inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) {
  1175. uint16_t t;
  1176. for (int i = 0; i < n; ++i) {
  1177. ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]);
  1178. memcpy(&t, &fp16, sizeof(uint16_t));
  1179. y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]) * g[i];
  1180. }
  1181. }
  1182. #else
  1183. inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) {
  1184. for (int i = 0; i < n; ++i) {
  1185. y[i] = ggml_gelu_quick_f32(x[i]) * g[i];
  1186. }
  1187. }
  1188. #endif
  1189. inline static void ggml_vec_geglu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) {
  1190. const uint16_t * i16 = (const uint16_t *) x;
  1191. for (int i = 0; i < n; ++i) {
  1192. float v = GGML_CPU_FP16_TO_FP32(g[i]);
  1193. y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[i16[i]]) * v);
  1194. }
  1195. }
  1196. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1197. #ifndef GGML_USE_ACCELERATE
  1198. ggml_float sum = 0.0;
  1199. for (int i = 0; i < n; ++i) {
  1200. sum += (ggml_float)x[i];
  1201. }
  1202. *s = (float)sum;
  1203. #else
  1204. vDSP_sve(x, 1, s, n);
  1205. #endif
  1206. }
  1207. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1208. ggml_float sum = 0.0;
  1209. for (int i = 0; i < n; ++i) {
  1210. sum += (ggml_float)x[i];
  1211. }
  1212. *s = sum;
  1213. }
  1214. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1215. float sum = 0.0f;
  1216. for (int i = 0; i < n; ++i) {
  1217. sum += GGML_CPU_FP16_TO_FP32(x[i]);
  1218. }
  1219. *s = sum;
  1220. }
  1221. inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) {
  1222. float sum = 0.0f;
  1223. for (int i = 0; i < n; ++i) {
  1224. sum += GGML_BF16_TO_FP32(x[i]);
  1225. }
  1226. *s = sum;
  1227. }
  1228. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1229. #ifndef GGML_USE_ACCELERATE
  1230. float max = -INFINITY;
  1231. for (int i = 0; i < n; ++i) {
  1232. max = MAX(max, x[i]);
  1233. }
  1234. *s = max;
  1235. #else
  1236. vDSP_maxv(x, 1, s, n);
  1237. #endif
  1238. }
  1239. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1240. ggml_vec_norm_f32(n, s, x);
  1241. *s = 1.f/(*s);
  1242. }
  1243. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1244. float max = -INFINITY;
  1245. int idx = 0;
  1246. for (int i = 0; i < n; ++i) {
  1247. max = MAX(max, x[i]);
  1248. if (max == x[i]) { idx = i; }
  1249. }
  1250. *s = idx;
  1251. }
  1252. #ifdef __cplusplus
  1253. }
  1254. #endif