binary-ops.cpp 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #include "binary-ops.h"
  2. #if defined(GGML_USE_ACCELERATE)
  3. #include <Accelerate/Accelerate.h>
  4. using vDSP_fn_t = void (*)(const float *, vDSP_Stride, const float *, vDSP_Stride, float *, vDSP_Stride, vDSP_Length);
  5. #endif
  6. static inline float op_add(float a, float b) {
  7. return a + b;
  8. }
  9. static inline float op_sub(float a, float b) {
  10. return a - b;
  11. }
  12. static inline float op_mul(float a, float b) {
  13. return a * b;
  14. }
  15. static inline float op_div(float a, float b) {
  16. return a / b;
  17. }
  18. template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
  19. static inline void vec_binary_op_contiguous(const int64_t n, dst_t * z, const src0_t * x, const src1_t * y) {
  20. constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
  21. constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32;
  22. constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
  23. for (int i = 0; i < n; i++) {
  24. z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(y[i])));
  25. }
  26. }
  27. template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
  28. static inline void vec_binary_op_non_contiguous(const int64_t n, const int64_t ne10, const int64_t nb10, dst_t * z, const src0_t * x, const src1_t * y) {
  29. constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
  30. constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32;
  31. constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
  32. for (int i = 0; i < n; i++) {
  33. int i10 = i % ne10;
  34. const src1_t * y_ptr = (const src1_t *)((const char *)y + i10*nb10);
  35. z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(*y_ptr)));
  36. }
  37. }
  38. template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
  39. static void apply_binary_op(const ggml_compute_params * params, ggml_tensor * dst) {
  40. const ggml_tensor * src0 = dst->src[0];
  41. const ggml_tensor * src1 = dst->src[1];
  42. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  43. GGML_TENSOR_BINARY_OP_LOCALS
  44. GGML_ASSERT( nb0 == sizeof(dst_t));
  45. GGML_ASSERT(nb00 == sizeof(src0_t));
  46. const auto [ir0, ir1] = get_thread_range(params, src0);
  47. const bool is_src1_contiguous = (nb10 == sizeof(src1_t));
  48. if (!is_src1_contiguous) { // broadcast not implemented yet for non-contiguous
  49. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  50. }
  51. #ifdef GGML_USE_ACCELERATE
  52. vDSP_fn_t vDSP_op = nullptr;
  53. // TODO - avoid the f32-only check using type 'trait' lookup tables and row-based src-to-float conversion functions
  54. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  55. if (op == op_add) {
  56. vDSP_op = vDSP_vadd;
  57. } else if (op == op_sub) {
  58. vDSP_op = vDSP_vsub;
  59. } else if (op == op_mul) {
  60. vDSP_op = vDSP_vmul;
  61. } else if (op == op_div) {
  62. vDSP_op = vDSP_vdiv;
  63. }
  64. }
  65. #endif
  66. for (int64_t ir = ir0; ir < ir1; ++ir) {
  67. const int64_t i03 = ir/(ne02*ne01);
  68. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  69. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  70. const int64_t i13 = i03 % ne13;
  71. const int64_t i12 = i02 % ne12;
  72. const int64_t i11 = i01 % ne11;
  73. dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  74. const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  75. const src1_t * src1_ptr = (const src1_t *) ((const char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  76. if (is_src1_contiguous) {
  77. // src1 is broadcastable across src0 and dst in i1, i2, i3
  78. const int64_t nr0 = ne00 / ne10;
  79. for (int64_t r = 0; r < nr0; ++r) {
  80. #ifdef GGML_USE_ACCELERATE
  81. if constexpr (std::is_same_v<src0_t, float> && std::is_same_v<src1_t, float> && std::is_same_v<dst_t, float>) {
  82. if (vDSP_op != nullptr) {
  83. vDSP_op(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  84. continue;
  85. }
  86. }
  87. #endif
  88. vec_binary_op_contiguous<op>(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  89. }
  90. } else {
  91. vec_binary_op_non_contiguous<op>(ne0, ne10, nb10, dst_ptr, src0_ptr, src1_ptr);
  92. }
  93. }
  94. }
  95. // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates
  96. template <float (*op)(float, float)>
  97. static void binary_op(const ggml_compute_params * params, ggml_tensor * dst) {
  98. const ggml_tensor * src0 = dst->src[0];
  99. const ggml_tensor * src1 = dst->src[1];
  100. /* */ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
  101. apply_binary_op<op, float, float, float>(params, dst);
  102. } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
  103. apply_binary_op<op, ggml_fp16_t, ggml_fp16_t, ggml_fp16_t>(params, dst);
  104. } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
  105. apply_binary_op<op, ggml_bf16_t, ggml_bf16_t, ggml_bf16_t>(params, dst);
  106. } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_BF16) {
  107. apply_binary_op<op, ggml_bf16_t, float, ggml_bf16_t>(params, dst);
  108. } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  109. apply_binary_op<op, ggml_bf16_t, float, float>(params, dst);
  110. } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  111. apply_binary_op<op, ggml_fp16_t, float, ggml_fp16_t>(params, dst);
  112. } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  113. apply_binary_op<op, ggml_fp16_t, float, float>(params, dst);
  114. } else {
  115. GGML_ABORT("%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
  116. ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type));
  117. }
  118. }
  119. void ggml_compute_forward_add_non_quantized(const ggml_compute_params * params, ggml_tensor * dst) {
  120. binary_op<op_add>(params, dst);
  121. }
  122. void ggml_compute_forward_sub(const ggml_compute_params * params, ggml_tensor * dst) {
  123. binary_op<op_sub>(params, dst);
  124. }
  125. void ggml_compute_forward_mul(const ggml_compute_params * params, ggml_tensor * dst) {
  126. binary_op<op_mul>(params, dst);
  127. }
  128. void ggml_compute_forward_div(const ggml_compute_params * params, ggml_tensor * dst) {
  129. binary_op<op_div>(params, dst);
  130. }