vec.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #include "vec.h"
  2. #include <cassert>
  3. // precomputed gelu table for f16 (128 KB)
  4. ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  5. // precomputed quick gelu table for f16 (128 KB)
  6. ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  7. void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc) {
  8. assert(nrc == 1);
  9. GGML_UNUSED(nrc);
  10. GGML_UNUSED(bx);
  11. GGML_UNUSED(by);
  12. GGML_UNUSED(bs);
  13. #if defined(GGML_SIMD)
  14. float sumf = 0.0f;
  15. #if defined(__ARM_FEATURE_SVE)
  16. const int sve_register_length = ggml_cpu_get_sve_cnt() * 8;
  17. const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16
  18. const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers
  19. const int np = (n & ~(ggml_f32_step - 1));
  20. svfloat32_t sum1 = svdup_n_f32(0.0f);
  21. svfloat32_t sum2 = svdup_n_f32(0.0f);
  22. svfloat32_t sum3 = svdup_n_f32(0.0f);
  23. svfloat32_t sum4 = svdup_n_f32(0.0f);
  24. svfloat32_t sum5 = svdup_n_f32(0.0f);
  25. svfloat32_t sum6 = svdup_n_f32(0.0f);
  26. svfloat32_t sum7 = svdup_n_f32(0.0f);
  27. svfloat32_t sum8 = svdup_n_f32(0.0f);
  28. svfloat32_t ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8;
  29. svfloat32_t ay1,ay2,ay3,ay4,ay5,ay6,ay7,ay8;
  30. for (int i = 0; i < np; i += ggml_f32_step) {
  31. ax1 = GGML_F32_VEC_LOAD(x + i);
  32. ay1 = GGML_F32_VEC_LOAD(y + i);
  33. sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1);
  34. ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr);
  35. ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr);
  36. sum2 = GGML_F32_VEC_FMA(sum2, ax2, ay2);
  37. ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr);
  38. ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr);
  39. sum3 = GGML_F32_VEC_FMA(sum3, ax3, ay3);
  40. ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr);
  41. ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr);
  42. sum4 = GGML_F32_VEC_FMA(sum4, ax4, ay4);
  43. ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr);
  44. ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr);
  45. sum5 = GGML_F32_VEC_FMA(sum5, ax5, ay5);
  46. ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr);
  47. ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr);
  48. sum6 = GGML_F32_VEC_FMA(sum6, ax6, ay6);
  49. ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr);
  50. ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr);
  51. sum7 = GGML_F32_VEC_FMA(sum7, ax7, ay7);
  52. ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr);
  53. ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr);
  54. sum8 = GGML_F32_VEC_FMA(sum8, ax8, ay8);
  55. }
  56. // leftovers
  57. // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop
  58. const int np2 = (n & ~(ggml_f32_epr - 1));
  59. for (int i = np; i < np2; i += ggml_f32_epr) {
  60. ax1 = GGML_F32_VEC_LOAD(x + i);
  61. ay1 = GGML_F32_VEC_LOAD(y + i);
  62. sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1);
  63. }
  64. // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only
  65. if (np2 < n) {
  66. svbool_t pg = svwhilelt_b32(np2, n);
  67. ax1 = svld1_f32(pg, x + np2);
  68. ay1 = svld1_f32(pg, y + np2);
  69. sum1 = svmad_f32_m(pg, ax1, ay1, sum1);
  70. }
  71. // reduce sum1,sum2 to sum1
  72. GGML_F32_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8);
  73. #elif defined(__riscv_v_intrinsic)
  74. vfloat32m1_t vsum = __riscv_vfmv_v_f_f32m1(0.0f, 1);
  75. for (int i = 0, avl; i < n; i += avl) {
  76. avl = __riscv_vsetvl_e32m8(n - i);
  77. vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[i], avl);
  78. vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl);
  79. vfloat32m8_t prod = __riscv_vfmul_vv_f32m8(ax, ay, avl);
  80. vsum = __riscv_vfredusum_vs_f32m8_f32m1(prod, vsum, avl);
  81. }
  82. sumf += __riscv_vfmv_f_s_f32m1_f32(vsum);
  83. #else
  84. const int np = (n & ~(GGML_F32_STEP - 1));
  85. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  86. GGML_F32_VEC ax[GGML_F32_ARR];
  87. GGML_F32_VEC ay[GGML_F32_ARR];
  88. for (int i = 0; i < np; i += GGML_F32_STEP) {
  89. for (int j = 0; j < GGML_F32_ARR; j++) {
  90. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  91. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  92. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  93. }
  94. }
  95. // reduce sum0..sum3 to sum0
  96. GGML_F32_VEC_REDUCE(sumf, sum);
  97. // leftovers
  98. for (int i = np; i < n; ++i) {
  99. sumf += x[i]*y[i];
  100. }
  101. #endif
  102. #else
  103. // scalar
  104. ggml_float sumf = 0.0;
  105. for (int i = 0; i < n; ++i) {
  106. sumf += (ggml_float)(x[i]*y[i]);
  107. }
  108. #endif
  109. *s = sumf;
  110. }
  111. void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc) {
  112. assert(nrc == 1);
  113. GGML_UNUSED(nrc);
  114. GGML_UNUSED(bx);
  115. GGML_UNUSED(by);
  116. GGML_UNUSED(bs);
  117. int i = 0;
  118. ggml_float sumf = 0;
  119. #if defined(__AVX512BF16__)
  120. __m512 c1 = _mm512_setzero_ps();
  121. __m512 c2 = _mm512_setzero_ps();
  122. for (; i + 64 <= n; i += 64) {
  123. c1 = _mm512_dpbf16_ps(c1, m512bh(_mm512_loadu_si512((x + i))),
  124. m512bh(_mm512_loadu_si512((y + i))));
  125. c2 = _mm512_dpbf16_ps(c2, m512bh(_mm512_loadu_si512((x + i + 32))),
  126. m512bh(_mm512_loadu_si512((y + i + 32))));
  127. }
  128. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  129. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  130. #elif defined(__AVX512F__)
  131. #define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16))
  132. __m512 c1 = _mm512_setzero_ps();
  133. __m512 c2 = _mm512_setzero_ps();
  134. for (; i + 32 <= n; i += 32) {
  135. c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  136. c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2);
  137. }
  138. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  139. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  140. #undef LOAD
  141. #elif defined(__AVX2__) || defined(__AVX__)
  142. #if defined(__AVX2__)
  143. #define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16))
  144. #else
  145. #define LOAD(p) _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16)), (_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_bsrli_si128(_mm_loadu_si128((const __m128i *)(p)), 8)), 16)), 1))
  146. #endif
  147. __m256 c1 = _mm256_setzero_ps();
  148. __m256 c2 = _mm256_setzero_ps();
  149. __m256 c3 = _mm256_setzero_ps();
  150. __m256 c4 = _mm256_setzero_ps();
  151. for (; i + 32 <= n; i += 32) {
  152. c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  153. c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2);
  154. c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3);
  155. c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4);
  156. }
  157. __m128 g;
  158. c1 = _mm256_add_ps(_mm256_add_ps(c1, c3),
  159. _mm256_add_ps(c2, c4));
  160. g = _mm_add_ps(_mm256_extractf128_ps(c1, 1),
  161. _mm256_castps256_ps128(c1));
  162. g = _mm_add_ps(g, _mm_movehl_ps(g, g));
  163. g = _mm_add_ss(g, _mm_movehdup_ps(g));
  164. sumf += (ggml_float)_mm_cvtss_f32(g);
  165. #undef LOAD
  166. #endif
  167. for (; i < n; ++i) {
  168. sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) *
  169. GGML_BF16_TO_FP32(y[i]));
  170. }
  171. *s = sumf;
  172. }
  173. void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc) {
  174. assert(nrc == 1);
  175. GGML_UNUSED(nrc);
  176. GGML_UNUSED(bx);
  177. GGML_UNUSED(by);
  178. GGML_UNUSED(bs);
  179. ggml_float sumf = 0.0;
  180. #if defined(GGML_SIMD) && !defined(__riscv_v_intrinsic)
  181. #if defined(__ARM_FEATURE_SVE)
  182. const int sve_register_length = svcntb() * 8; //get vector length
  183. const int ggml_f16_epr = sve_register_length / 16; // running when 16
  184. const int ggml_f16_step = 8 * ggml_f16_epr; // choose 8 SVE registers
  185. const int np= (n & ~(ggml_f16_step - 1));
  186. svfloat16_t sum1 = svdup_n_f16(0.0f);
  187. svfloat16_t sum2 = svdup_n_f16(0.0f);
  188. svfloat16_t sum3 = svdup_n_f16(0.0f);
  189. svfloat16_t sum4 = svdup_n_f16(0.0f);
  190. svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8;
  191. svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8;
  192. for (int i = 0; i < np; i += ggml_f16_step) {
  193. ax1 = GGML_F16x_VEC_LOAD(x + i + 0 * ggml_f16_epr, 0);
  194. ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0);
  195. sum1 = GGML_F16x_VEC_FMA(sum1, ax1, ay1);
  196. ax2 = GGML_F16x_VEC_LOAD(x + i + 1 * ggml_f16_epr, 1);
  197. ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1);
  198. sum2 = GGML_F16x_VEC_FMA(sum2, ax2, ay2);
  199. ax3 = GGML_F16x_VEC_LOAD(x + i + 2 * ggml_f16_epr, 2);
  200. ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2);
  201. sum3 = GGML_F16x_VEC_FMA(sum3, ax3, ay3);
  202. ax4 = GGML_F16x_VEC_LOAD(x + i + 3 * ggml_f16_epr, 3);
  203. ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3);
  204. sum4 = GGML_F16x_VEC_FMA(sum4, ax4, ay4);
  205. ax5 = GGML_F16x_VEC_LOAD(x + i + 4 * ggml_f16_epr, 4);
  206. ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4);
  207. sum1 = GGML_F16x_VEC_FMA(sum1, ax5, ay5);
  208. ax6 = GGML_F16x_VEC_LOAD(x + i + 5 * ggml_f16_epr, 5);
  209. ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5);
  210. sum2 = GGML_F16x_VEC_FMA(sum2, ax6, ay6);
  211. ax7 = GGML_F16x_VEC_LOAD(x + i + 6 * ggml_f16_epr, 6);
  212. ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6);
  213. sum3 = GGML_F16x_VEC_FMA(sum3, ax7, ay7);
  214. ax8 = GGML_F16x_VEC_LOAD(x + i + 7 * ggml_f16_epr, 7);
  215. ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7);
  216. sum4 = GGML_F16x_VEC_FMA(sum4, ax8, ay8);
  217. }
  218. const int np2 = (n & ~(ggml_f16_epr - 1)); // round down to multiple of 8
  219. for (int k = np; k < np2; k += ggml_f16_epr) {
  220. svfloat16_t rx = GGML_F16x_VEC_LOAD(x + k, 0);
  221. svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0);
  222. sum1 = GGML_F16x_VEC_FMA(sum1, rx, ry);
  223. }
  224. if (np2 < n) {
  225. svbool_t pg = svwhilelt_b16(np2, n);
  226. svfloat16_t hx = svld1_f16(pg, (const __fp16 *)(x + np2));
  227. svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2));
  228. sum1 = svmad_f16_x(pg, hx, hy, sum1);
  229. }
  230. GGML_F16x_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4);
  231. #else
  232. const int np = (n & ~(GGML_F16_STEP - 1));
  233. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  234. GGML_F16_VEC ax[GGML_F16_ARR];
  235. GGML_F16_VEC ay[GGML_F16_ARR];
  236. for (int i = 0; i < np; i += GGML_F16_STEP) {
  237. for (int j = 0; j < GGML_F16_ARR; j++) {
  238. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  239. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  240. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  241. }
  242. }
  243. // reduce sum0..sum3 to sum0
  244. GGML_F16_VEC_REDUCE(sumf, sum);
  245. // leftovers
  246. for (int i = np; i < n; ++i) {
  247. sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i]));
  248. }
  249. // if you hit this, you are likely running outside the FP range
  250. assert(!isnan(sumf) && !isinf(sumf));
  251. #endif
  252. #else
  253. for (int i = 0; i < n; ++i) {
  254. sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i]));
  255. }
  256. #endif
  257. *s = sumf;
  258. }
  259. void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  260. int i = 0;
  261. #if defined(__AVX512F__) && defined(__AVX512DQ__)
  262. for (; i + 15 < n; i += 16) {
  263. _mm512_storeu_ps(y + i, ggml_v_silu(_mm512_loadu_ps(x + i)));
  264. }
  265. #elif defined(__AVX2__) && defined(__FMA__)
  266. for (; i + 7 < n; i += 8) {
  267. _mm256_storeu_ps(y + i, ggml_v_silu(_mm256_loadu_ps(x + i)));
  268. }
  269. #elif defined(__SSE2__)
  270. for (; i + 3 < n; i += 4) {
  271. _mm_storeu_ps(y + i, ggml_v_silu(_mm_loadu_ps(x + i)));
  272. }
  273. #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__)
  274. const int vlen = svcntw();
  275. for (; i < n; i += vlen) {
  276. const svbool_t pg = svwhilelt_b32_s32(i, n);
  277. svst1_f32(pg, y + i, ggml_v_silu(pg, svld1_f32(pg, x + i)));
  278. }
  279. #elif defined(__ARM_NEON) && defined(__aarch64__)
  280. for (; i + 3 < n; i += 4) {
  281. vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i)));
  282. }
  283. #endif
  284. for (; i < n; ++i) {
  285. y[i] = ggml_silu_f32(x[i]);
  286. }
  287. }
  288. void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g) {
  289. int i = 0;
  290. #if defined(__AVX512F__) && defined(__AVX512DQ__)
  291. for (; i + 15 < n; i += 16) {
  292. _mm512_storeu_ps(y + i, _mm512_mul_ps(ggml_v_silu(_mm512_loadu_ps(x + i)), _mm512_loadu_ps(g + i)));
  293. }
  294. #elif defined(__AVX2__) && defined(__FMA__)
  295. for (; i + 7 < n; i += 8) {
  296. _mm256_storeu_ps(y + i, _mm256_mul_ps(ggml_v_silu(_mm256_loadu_ps(x + i)), _mm256_loadu_ps(g + i)));
  297. }
  298. #elif defined(__SSE2__)
  299. for (; i + 3 < n; i += 4) {
  300. _mm_storeu_ps(y + i, _mm_mul_ps(ggml_v_silu(_mm_loadu_ps(x + i)), _mm_loadu_ps(g + i)));
  301. }
  302. #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__)
  303. const int vlen = svcntw();
  304. for (; i < n; i += vlen) {
  305. const svbool_t pg = svwhilelt_b32_s32(i, n);
  306. svst1_f32(pg, y + i, svmul_f32_x(pg, ggml_v_silu(pg, svld1_f32(pg, x + i)), svld1_f32(pg, g + i)));
  307. }
  308. #elif defined(__ARM_NEON) && defined(__aarch64__)
  309. for (; i + 3 < n; i += 4) {
  310. vst1q_f32(y + i, vmulq_f32(ggml_v_silu(vld1q_f32(x + i)), vld1q_f32(g + i)));
  311. }
  312. #endif
  313. for (; i < n; ++i) {
  314. y[i] = ggml_silu_f32(x[i]) * g[i];
  315. }
  316. }
  317. ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) {
  318. int i = 0;
  319. ggml_float sum = 0;
  320. #if defined(__AVX512F__) && defined(__AVX512DQ__)
  321. for (; i + 15 < n; i += 16) {
  322. __m512 val = ggml_v_expf(_mm512_sub_ps(_mm512_loadu_ps(x + i),
  323. _mm512_set1_ps(max)));
  324. _mm512_storeu_ps(y + i, val);
  325. sum += (ggml_float)_mm512_reduce_add_ps(val);
  326. }
  327. #elif defined(__AVX2__) && defined(__FMA__)
  328. for (; i + 7 < n; i += 8) {
  329. __m256 val = ggml_v_expf(_mm256_sub_ps(_mm256_loadu_ps(x + i),
  330. _mm256_set1_ps(max)));
  331. _mm256_storeu_ps(y + i, val);
  332. __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1),
  333. _mm256_castps256_ps128(val));
  334. val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2));
  335. val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2));
  336. sum += (ggml_float)_mm_cvtss_f32(val2);
  337. }
  338. #elif defined(__SSE2__)
  339. for (; i + 3 < n; i += 4) {
  340. __m128 val = ggml_v_expf(_mm_sub_ps(_mm_loadu_ps(x + i),
  341. _mm_set1_ps(max)));
  342. _mm_storeu_ps(y + i, val);
  343. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
  344. val = _mm_add_ps(val, _mm_movehl_ps(val, val));
  345. val = _mm_add_ss(val, _mm_movehdup_ps(val));
  346. #else
  347. __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1));
  348. val = _mm_add_ps(val, tmp);
  349. tmp = _mm_movehl_ps(tmp, val);
  350. val = _mm_add_ss(val, tmp);
  351. #endif
  352. sum += (ggml_float)_mm_cvtss_f32(val);
  353. }
  354. #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__)
  355. const int vlen = svcntw();
  356. for (; i < n; i += vlen) {
  357. const svbool_t pg = svwhilelt_b32_s32(i, n);
  358. svfloat32_t val = ggml_v_expf(pg, svsub_f32_x(pg, svld1_f32(pg, x + i),
  359. svdup_n_f32_x(pg, max)));
  360. svst1_f32(pg, y + i, val);
  361. sum += (ggml_float)svaddv_f32(pg, val);
  362. }
  363. #elif defined(__ARM_NEON) && defined(__aarch64__)
  364. for (; i + 3 < n; i += 4) {
  365. float32x4_t val = ggml_v_expf(vsubq_f32(vld1q_f32(x + i),
  366. vdupq_n_f32(max)));
  367. vst1q_f32(y + i, val);
  368. sum += (ggml_float)vaddvq_f32(val);
  369. }
  370. #elif defined(__riscv_v_intrinsic)
  371. vfloat64m1_t vsum = __riscv_vfmv_v_f_f64m1(0, 1);
  372. for (int avl; i < n; i += avl) {
  373. avl = __riscv_vsetvl_e32m2(n - i);
  374. vfloat32m2_t val = ggml_v_expf_m2(__riscv_vfsub_vf_f32m2(__riscv_vle32_v_f32m2(&x[i], avl), max, avl), avl);
  375. __riscv_vse32_v_f32m2(&y[i], val, avl);
  376. vsum = __riscv_vfwredusum_vs_f32m2_f64m1(val, vsum, avl);
  377. }
  378. return (ggml_float)__riscv_vfmv_f_s_f64m1_f64(vsum);
  379. #endif
  380. for (; i < n; ++i) {
  381. float val = expf(x[i] - max);
  382. sum += (ggml_float)val;
  383. y[i] = val;
  384. }
  385. return sum;
  386. }
  387. ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max) {
  388. // log(soft_max) = log(soft_max_i / soft_max_sum) = log(soft_max_i) - log(soft_max_sum) = (logit_i - max) - log(soft_max_i)
  389. int i = 0;
  390. ggml_float sum = 0;
  391. for (; i < n; ++i) {
  392. float val = x[i] - max;
  393. y[i] = val;
  394. sum += (ggml_float)expf(val);
  395. }
  396. return sum = (ggml_float)logf(sum);
  397. }