ggml-impl.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. #pragma once
  2. #include "ggml.h"
  3. // GGML internal header
  4. #include <assert.h>
  5. #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
  6. #include <stddef.h>
  7. #include <stdbool.h>
  8. #include <string.h> // memcpy
  9. #include <math.h> // fabsf
  10. #undef MIN
  11. #undef MAX
  12. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  13. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  14. #ifdef __cplusplus
  15. extern "C" {
  16. #endif
  17. // static_assert should be a #define, but if it's not,
  18. // fall back to the _Static_assert C11 keyword.
  19. // if C99 - static_assert is noop
  20. // ref: https://stackoverflow.com/a/53923785/4039976
  21. #ifndef __cplusplus
  22. #ifndef static_assert
  23. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
  24. #define static_assert(cond, msg) _Static_assert(cond, msg)
  25. #else
  26. #define static_assert(cond, msg) struct global_scope_noop_trick
  27. #endif
  28. #endif
  29. #endif
  30. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  31. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  32. #ifndef __FMA__
  33. #define __FMA__
  34. #endif
  35. #ifndef __F16C__
  36. #define __F16C__
  37. #endif
  38. #ifndef __SSE3__
  39. #define __SSE3__
  40. #endif
  41. #endif
  42. // 16-bit float
  43. // on Arm, we use __fp16
  44. // on x86, we use uint16_t
  45. #if defined(__ARM_NEON)
  46. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  47. //
  48. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  49. //
  50. #include <arm_neon.h>
  51. #ifdef _MSC_VER
  52. typedef uint16_t ggml_fp16_internal_t;
  53. #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
  54. #else
  55. typedef __fp16 ggml_fp16_internal_t;
  56. #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
  57. #endif // _MSC_VER
  58. #if !defined(__aarch64__)
  59. // 32-bit ARM compatibility
  60. // vaddvq_s16
  61. // vpaddq_s16
  62. // vpaddq_s32
  63. // vaddvq_s32
  64. // vaddvq_f32
  65. // vmaxvq_f32
  66. // vcvtnq_s32_f32
  67. // vzip1_u8
  68. // vzip2_u8
  69. inline static int32_t vaddvq_s16(int16x8_t v) {
  70. return
  71. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  72. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  73. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  74. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  75. }
  76. inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
  77. int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
  78. int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
  79. return vcombine_s16(a0, b0);
  80. }
  81. inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
  82. int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
  83. int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
  84. return vcombine_s32(a0, b0);
  85. }
  86. inline static int32_t vaddvq_s32(int32x4_t v) {
  87. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  88. }
  89. inline static float vaddvq_f32(float32x4_t v) {
  90. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  91. }
  92. inline static float vmaxvq_f32(float32x4_t v) {
  93. return
  94. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  95. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  96. }
  97. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  98. int32x4_t res;
  99. res[0] = roundf(vgetq_lane_f32(v, 0));
  100. res[1] = roundf(vgetq_lane_f32(v, 1));
  101. res[2] = roundf(vgetq_lane_f32(v, 2));
  102. res[3] = roundf(vgetq_lane_f32(v, 3));
  103. return res;
  104. }
  105. inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  106. uint8x8_t res;
  107. res[0] = a[0]; res[1] = b[0];
  108. res[2] = a[1]; res[3] = b[1];
  109. res[4] = a[2]; res[5] = b[2];
  110. res[6] = a[3]; res[7] = b[3];
  111. return res;
  112. }
  113. inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  114. uint8x8_t res;
  115. res[0] = a[4]; res[1] = b[4];
  116. res[2] = a[5]; res[3] = b[5];
  117. res[4] = a[6]; res[5] = b[6];
  118. res[6] = a[7]; res[7] = b[7];
  119. return res;
  120. }
  121. // vld1q_s16_x2
  122. // vld1q_u8_x2
  123. // vld1q_u8_x4
  124. // vld1q_s8_x2
  125. // vld1q_s8_x4
  126. // TODO: double-check these work correctly
  127. typedef struct ggml_int16x8x2_t {
  128. int16x8_t val[2];
  129. } ggml_int16x8x2_t;
  130. inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
  131. ggml_int16x8x2_t res;
  132. res.val[0] = vld1q_s16(ptr + 0);
  133. res.val[1] = vld1q_s16(ptr + 8);
  134. return res;
  135. }
  136. typedef struct ggml_uint8x16x2_t {
  137. uint8x16_t val[2];
  138. } ggml_uint8x16x2_t;
  139. inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
  140. ggml_uint8x16x2_t res;
  141. res.val[0] = vld1q_u8(ptr + 0);
  142. res.val[1] = vld1q_u8(ptr + 16);
  143. return res;
  144. }
  145. typedef struct ggml_uint8x16x4_t {
  146. uint8x16_t val[4];
  147. } ggml_uint8x16x4_t;
  148. inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
  149. ggml_uint8x16x4_t res;
  150. res.val[0] = vld1q_u8(ptr + 0);
  151. res.val[1] = vld1q_u8(ptr + 16);
  152. res.val[2] = vld1q_u8(ptr + 32);
  153. res.val[3] = vld1q_u8(ptr + 48);
  154. return res;
  155. }
  156. typedef struct ggml_int8x16x2_t {
  157. int8x16_t val[2];
  158. } ggml_int8x16x2_t;
  159. inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
  160. ggml_int8x16x2_t res;
  161. res.val[0] = vld1q_s8(ptr + 0);
  162. res.val[1] = vld1q_s8(ptr + 16);
  163. return res;
  164. }
  165. typedef struct ggml_int8x16x4_t {
  166. int8x16_t val[4];
  167. } ggml_int8x16x4_t;
  168. inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
  169. ggml_int8x16x4_t res;
  170. res.val[0] = vld1q_s8(ptr + 0);
  171. res.val[1] = vld1q_s8(ptr + 16);
  172. res.val[2] = vld1q_s8(ptr + 32);
  173. res.val[3] = vld1q_s8(ptr + 48);
  174. return res;
  175. }
  176. // NOTE: not tested
  177. inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
  178. int8x16_t res;
  179. res[ 0] = a[b[ 0]];
  180. res[ 1] = a[b[ 1]];
  181. res[ 2] = a[b[ 2]];
  182. res[ 3] = a[b[ 3]];
  183. res[ 4] = a[b[ 4]];
  184. res[ 5] = a[b[ 5]];
  185. res[ 6] = a[b[ 6]];
  186. res[ 7] = a[b[ 7]];
  187. res[ 8] = a[b[ 8]];
  188. res[ 9] = a[b[ 9]];
  189. res[10] = a[b[10]];
  190. res[11] = a[b[11]];
  191. res[12] = a[b[12]];
  192. res[13] = a[b[13]];
  193. res[14] = a[b[14]];
  194. res[15] = a[b[15]];
  195. return res;
  196. }
  197. // NOTE: not tested
  198. inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
  199. uint8x16_t res;
  200. res[ 0] = a[b[ 0]];
  201. res[ 1] = a[b[ 1]];
  202. res[ 2] = a[b[ 2]];
  203. res[ 3] = a[b[ 3]];
  204. res[ 4] = a[b[ 4]];
  205. res[ 5] = a[b[ 5]];
  206. res[ 6] = a[b[ 6]];
  207. res[ 7] = a[b[ 7]];
  208. res[ 8] = a[b[ 8]];
  209. res[ 9] = a[b[ 9]];
  210. res[10] = a[b[10]];
  211. res[11] = a[b[11]];
  212. res[12] = a[b[12]];
  213. res[13] = a[b[13]];
  214. res[14] = a[b[14]];
  215. res[15] = a[b[15]];
  216. return res;
  217. }
  218. #else
  219. #define ggml_int16x8x2_t int16x8x2_t
  220. #define ggml_uint8x16x2_t uint8x16x2_t
  221. #define ggml_uint8x16x4_t uint8x16x4_t
  222. #define ggml_int8x16x2_t int8x16x2_t
  223. #define ggml_int8x16x4_t int8x16x4_t
  224. #define ggml_vld1q_s16_x2 vld1q_s16_x2
  225. #define ggml_vld1q_u8_x2 vld1q_u8_x2
  226. #define ggml_vld1q_u8_x4 vld1q_u8_x4
  227. #define ggml_vld1q_s8_x2 vld1q_s8_x2
  228. #define ggml_vld1q_s8_x4 vld1q_s8_x4
  229. #define ggml_vqtbl1q_s8 vqtbl1q_s8
  230. #define ggml_vqtbl1q_u8 vqtbl1q_u8
  231. #endif // !defined(__aarch64__)
  232. #if !defined(__ARM_FEATURE_DOTPROD)
  233. inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
  234. const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
  235. const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
  236. return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
  237. }
  238. #else
  239. #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
  240. #endif // !defined(__ARM_FEATURE_DOTPROD)
  241. #endif // defined(__ARM_NEON)
  242. #if defined(__ARM_NEON) && !defined(_MSC_VER)
  243. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  244. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  245. #define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  246. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  247. ggml_fp16_internal_t tmp;
  248. memcpy(&tmp, &h, sizeof(ggml_fp16_t));
  249. return (float)tmp;
  250. }
  251. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  252. ggml_fp16_t res;
  253. ggml_fp16_internal_t tmp = f;
  254. memcpy(&res, &tmp, sizeof(ggml_fp16_t));
  255. return res;
  256. }
  257. #else
  258. #ifdef __wasm_simd128__
  259. #include <wasm_simd128.h>
  260. #else
  261. #ifdef __POWER9_VECTOR__
  262. #include <altivec.h>
  263. #undef bool
  264. #define bool _Bool
  265. #else
  266. #if defined(_MSC_VER) || defined(__MINGW32__)
  267. #include <intrin.h>
  268. #else
  269. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__)
  270. #if !defined(__riscv)
  271. #include <immintrin.h>
  272. #endif
  273. #endif
  274. #endif
  275. #endif
  276. #endif
  277. #ifdef __riscv_v_intrinsic
  278. #include <riscv_vector.h>
  279. #endif
  280. #ifdef __F16C__
  281. #ifdef _MSC_VER
  282. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  283. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  284. #else
  285. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  286. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  287. #endif
  288. #elif defined(__POWER9_VECTOR__)
  289. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  290. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  291. /* the inline asm below is about 12% faster than the lookup method */
  292. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  293. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  294. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  295. register float f;
  296. register double d;
  297. __asm__(
  298. "mtfprd %0,%2\n"
  299. "xscvhpdp %0,%0\n"
  300. "frsp %1,%0\n" :
  301. /* temp */ "=d"(d),
  302. /* out */ "=f"(f):
  303. /* in */ "r"(h));
  304. return f;
  305. }
  306. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  307. register double d;
  308. register ggml_fp16_t r;
  309. __asm__( /* xscvdphp can work on double or single precision */
  310. "xscvdphp %0,%2\n"
  311. "mffprd %1,%0\n" :
  312. /* temp */ "=d"(d),
  313. /* out */ "=r"(r):
  314. /* in */ "f"(f));
  315. return r;
  316. }
  317. #else
  318. // FP16 <-> FP32
  319. // ref: https://github.com/Maratyszcza/FP16
  320. static inline float fp32_from_bits(uint32_t w) {
  321. union {
  322. uint32_t as_bits;
  323. float as_value;
  324. } fp32;
  325. fp32.as_bits = w;
  326. return fp32.as_value;
  327. }
  328. static inline uint32_t fp32_to_bits(float f) {
  329. union {
  330. float as_value;
  331. uint32_t as_bits;
  332. } fp32;
  333. fp32.as_value = f;
  334. return fp32.as_bits;
  335. }
  336. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  337. const uint32_t w = (uint32_t) h << 16;
  338. const uint32_t sign = w & UINT32_C(0x80000000);
  339. const uint32_t two_w = w + w;
  340. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  341. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  342. const float exp_scale = 0x1.0p-112f;
  343. #else
  344. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  345. #endif
  346. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  347. const uint32_t magic_mask = UINT32_C(126) << 23;
  348. const float magic_bias = 0.5f;
  349. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  350. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  351. const uint32_t result = sign |
  352. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  353. return fp32_from_bits(result);
  354. }
  355. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  356. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  357. const float scale_to_inf = 0x1.0p+112f;
  358. const float scale_to_zero = 0x1.0p-110f;
  359. #else
  360. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  361. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  362. #endif
  363. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  364. const uint32_t w = fp32_to_bits(f);
  365. const uint32_t shl1_w = w + w;
  366. const uint32_t sign = w & UINT32_C(0x80000000);
  367. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  368. if (bias < UINT32_C(0x71000000)) {
  369. bias = UINT32_C(0x71000000);
  370. }
  371. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  372. const uint32_t bits = fp32_to_bits(base);
  373. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  374. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  375. const uint32_t nonsign = exp_bits + mantissa_bits;
  376. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  377. }
  378. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  379. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  380. #endif // __F16C__
  381. #endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
  382. // precomputed f32 table for f16 (256 KB)
  383. // defined in ggml.c, initialized in ggml_init()
  384. extern float ggml_table_f32_f16[1 << 16];
  385. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  386. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  387. // This is also true for POWER9.
  388. #if !defined(GGML_FP16_TO_FP32)
  389. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  390. uint16_t s;
  391. memcpy(&s, &f, sizeof(uint16_t));
  392. return ggml_table_f32_f16[s];
  393. }
  394. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  395. #endif
  396. #if !defined(GGML_FP32_TO_FP16)
  397. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  398. #endif
  399. #define GGML_HASHTABLE_FULL ((size_t)-1)
  400. #define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
  401. struct ggml_hash_set ggml_hash_set_new(size_t size);
  402. bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
  403. // returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
  404. size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
  405. // returns GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
  406. size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
  407. // return index, asserts if table is full
  408. size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
  409. #ifdef __cplusplus
  410. }
  411. #endif